[opengm] 34/386: Rename opengm::Parameters (aka ModelParameters) for learning to opengm::learning::Weights, and graphicalmodel/parameters.hxx to graphicalmodel/weights.hxx

Ghislain Vaillant ghisvail-guest at moszumanska.debian.org
Wed Aug 31 08:35:01 UTC 2016


This is an automated email from the git hooks/post-receive script.

ghisvail-guest pushed a commit to branch debian/master
in repository opengm.

commit de022360de7d040d70dbd3066d4210744ef82364
Author: Carsten Haubold <carstenhaubold at googlemail.com>
Date:   Mon Dec 15 13:51:03 2014 +0100

    Rename opengm::Parameters (aka ModelParameters) for learning to opengm::learning::Weights, and graphicalmodel/parameters.hxx to graphicalmodel/weights.hxx
---
 .../opengm/functions/function_properties_base.hxx  | 10 +--
 include/opengm/functions/l_potts.hxx               | 25 ++++----
 include/opengm/functions/learnable/lpotts.hxx      | 71 +++++++++++-----------
 .../opengm/functions/learnable/sum_of_experts.hxx  | 60 +++++++++---------
 include/opengm/functions/learnablefunction.hxx     | 16 ++---
 include/opengm/functions/potts.hxx                 |  4 +-
 include/opengm/graphicalmodel/parameters.hxx       | 44 --------------
 include/opengm/graphicalmodel/weights.hxx          | 44 ++++++++++++++
 include/opengm/learning/bundle-optimizer.hxx       | 30 ++++-----
 include/opengm/learning/dataset/dataset.hxx        | 15 ++---
 include/opengm/learning/dataset/testdataset.hxx    | 12 ++--
 include/opengm/learning/dataset/testdataset2.hxx   | 14 ++---
 include/opengm/learning/gridsearch-learning.hxx    | 40 ++++++------
 include/opengm/learning/struct-max-margin.hxx      | 10 +--
 src/unittest/test_gm_learning_functions.cxx        | 10 +--
 src/unittest/test_gridsearch_learner.cxx           |  4 +-
 src/unittest/test_learnable_functions.cxx          | 10 +--
 src/unittest/test_learning.cxx                     |  8 +--
 18 files changed, 214 insertions(+), 213 deletions(-)

diff --git a/include/opengm/functions/function_properties_base.hxx b/include/opengm/functions/function_properties_base.hxx
index 2b70a77..4561507 100644
--- a/include/opengm/functions/function_properties_base.hxx
+++ b/include/opengm/functions/function_properties_base.hxx
@@ -18,7 +18,7 @@
 #include "opengm/operations/adder.hxx"
 #include "opengm/operations/integrator.hxx"
 #include "opengm/operations/multiplier.hxx"
-#include "opengm/graphicalmodel/parameters.hxx"
+#include "opengm/graphicalmodel/weights.hxx"
 
 #define OPENGM_FLOAT_TOL 0.000001
 
@@ -127,15 +127,15 @@ public:
    FunctionShapeIteratorType functionShapeBegin() const;
    FunctionShapeIteratorType functionShapeEnd() const;
 
-   size_t numberOfParameters()const{
+   size_t numberOfWeights()const{
       return 0;
    }
-   INDEX parameterIndex(const size_t paramNumber)const{
+   INDEX weightIndex(const size_t weightNumber)const{
       throw RuntimeError("Function base has no parameters,this needs to be implemented in any function type");
    }
-   void setParameters(const Parameters<VALUE,INDEX>& parameters){}
+   void setWeights(const opengm::learning::Weights<VALUE>& weights){}
    template<class ITERATOR> 
-   VALUE parameterGradient(size_t,ITERATOR) const {return 0;}
+   VALUE weightGradient(size_t,ITERATOR) const {return 0;}
 
 };
 
diff --git a/include/opengm/functions/l_potts.hxx b/include/opengm/functions/l_potts.hxx
index 8694859..07d7a07 100644
--- a/include/opengm/functions/l_potts.hxx
+++ b/include/opengm/functions/l_potts.hxx
@@ -9,6 +9,7 @@
 #include "opengm/opengm.hxx"
 #include "opengm/functions/function_registration.hxx"
 #include "opengm/functions/function_properties_base.hxx"
+#include "opengm/graphicalmodel/weights.hxx"
 
 namespace opengm {
 
@@ -27,7 +28,7 @@ public:
    LPottsFunction(
       const LabelType,
       const LabelType,
-      const Parameters<ValueType,IndexType> & parameters,
+      const opengm::learning::Weights<ValueType> & parameters,
       const IndexType valueNotEqual
    );
    LabelType shape(const size_t) const;
@@ -45,10 +46,10 @@ public:
    MinMaxFunctor<ValueType> minMax() const;
 
    // parameters
-   size_t numberOfParameters()const{
+   size_t numberOfWeights()const{
       return 1;
    }
-   IndexType parameterIndex(const size_t paramNumber)const{
+   IndexType weightIndex(const size_t paramNumber)const{
       return piValueNotEqual_;
    }
 
@@ -57,7 +58,7 @@ private:
    LabelType numberOfLabels1_;
    LabelType numberOfLabels2_;
 
-   const Parameters<ValueType,IndexType> * params_;
+   const opengm::learning::Weights<ValueType> * weights_;
 
    IndexType piValueNotEqual_;
 
@@ -82,12 +83,12 @@ LPottsFunction<T, I, L>::LPottsFunction
 (
    const L numberOfLabels1,
    const L numberOfLabels2,
-   const Parameters<ValueType,IndexType> & parameters,
+   const opengm::learning::Weights<ValueType> & weights,
    const IndexType valueNotEqual
 )
 :  numberOfLabels1_(numberOfLabels1),
    numberOfLabels2_(numberOfLabels2),
-   params_(&parameters),
+   weights_(&weights),
    piValueNotEqual_(valueNotEqual)
 {}
 
@@ -99,7 +100,7 @@ LPottsFunction<T, I, L>::operator()
    ITERATOR begin
 ) const {
    return (begin[0]==begin[1] ? 
-      static_cast<ValueType>(0.0) : params_->getParameter(piValueNotEqual_) );
+      static_cast<ValueType>(0.0) : weights_->getWeight(piValueNotEqual_) );
 }
 
 
@@ -157,7 +158,7 @@ template<class T, class I, class L>
 inline typename LPottsFunction<T, I, L>::ValueType
 LPottsFunction<T, I, L>::min() const
 {
-   const T val = params_->getParameter(piValueNotEqual_);
+   const T val = weights_->getWeight(piValueNotEqual_);
    return 0.0<val ? 0.0 :val;
 }
 
@@ -165,7 +166,7 @@ template<class T, class I, class L>
 inline typename LPottsFunction<T, I, L>::ValueType
 LPottsFunction<T, I, L>::max() const
 {
-  const T val = params_->getParameter(piValueNotEqual_);
+  const T val = weights_->getWeight(piValueNotEqual_);
   return 0.0>val ? 0.0 :val;
 }
 
@@ -173,7 +174,7 @@ template<class T, class I, class L>
 inline typename LPottsFunction<T, I, L>::ValueType
 LPottsFunction<T, I, L>::sum() const
 {
-    const T val = params_->getParameter(piValueNotEqual_);
+    const T val = weights_->getWeight(piValueNotEqual_);
     const LabelType minLabels = std::min(numberOfLabels1_, numberOfLabels2_);
     return val * static_cast<T>(numberOfLabels1_ * numberOfLabels2_ - minLabels);
 }
@@ -190,10 +191,10 @@ inline MinMaxFunctor<typename LPottsFunction<T, I, L>::ValueType>
 LPottsFunction<T, I, L>::minMax() const
 {
    if(static_cast<ValueType>(0) < piValueNotEqual_) {
-      return MinMaxFunctor<T>(static_cast<ValueType>(0), params_[piValueNotEqual_]);
+      return MinMaxFunctor<T>(static_cast<ValueType>(0), weights_[piValueNotEqual_]);
    }
    else {
-      return MinMaxFunctor<T>(params_[piValueNotEqual_], static_cast<ValueType>(0));
+      return MinMaxFunctor<T>(weights_[piValueNotEqual_], static_cast<ValueType>(0));
    }
 }
 
diff --git a/include/opengm/functions/learnable/lpotts.hxx b/include/opengm/functions/learnable/lpotts.hxx
index 6bc627d..0c2c9ba 100644
--- a/include/opengm/functions/learnable/lpotts.hxx
+++ b/include/opengm/functions/learnable/lpotts.hxx
@@ -9,6 +9,7 @@
 #include "opengm/opengm.hxx"
 #include "opengm/functions/function_registration.hxx"
 #include "opengm/functions/function_properties_base.hxx"
+#include "opengm/graphicalmodel/weights.hxx"
 
 namespace opengm {
 namespace functions {
@@ -34,15 +35,13 @@ public:
    typedef I IndexType;
  
    LPotts();
-   LPotts(
-      const Parameters<T,I>& parameters,
+   LPotts(const opengm::learning::Weights<T>& weights,
       const L numLabels,
-      const std::vector<size_t>& parameterIDs,
+      const std::vector<size_t>& weightIDs,
       const std::vector<T>& feat
-      ); 
-   LPotts(
-      const L numLabels,
-      const std::vector<size_t>& parameterIDs,
+      );
+   LPotts(const L numLabels,
+      const std::vector<size_t>& weightIDs,
       const std::vector<T>& feat
       );
    L shape(const size_t) const;
@@ -51,19 +50,19 @@ public:
    template<class ITERATOR> T operator()(ITERATOR) const;
  
    // parameters
-   void setParameters(const Parameters<T,I>& parameters)
-      {parameters_ = ¶meters;}
-   size_t numberOfParameters()const
-     {return parameterIDs_.size();}
-   I parameterIndex(const size_t paramNumber) const
-     {return parameterIDs_[paramNumber];} //dummy
+   void setWeights(const opengm::learning::Weights<T>& weights)
+      {weights_ = &weights;}
+   size_t numberOfWeights()const
+     {return weightIDs_.size();}
+   I weightIndex(const size_t weightNumber) const
+     {return weightIDs_[weightNumber];} //dummy
    template<class ITERATOR> 
-   T parameterGradient(size_t,ITERATOR) const;
+   T weightGradient(size_t,ITERATOR) const;
 
 protected:
-   const Parameters<T,I> * parameters_;
+   const opengm::learning::Weights<T> * weights_;
    L numLabels_;
-   std::vector<size_t> parameterIDs_;
+   std::vector<size_t> weightIDs_;
    std::vector<T> feat_;
 
 
@@ -75,14 +74,14 @@ template <class T, class I, class L>
 inline
 LPotts<T, I, L>::LPotts
 ( 
-   const Parameters<T,I>& parameters,
+   const opengm::learning::Weights<T>& weights,
    const L numLabels,
-   const std::vector<size_t>& parameterIDs,
+   const std::vector<size_t>& weightIDs,
    const std::vector<T>& feat
    )
-   :  parameters_(&parameters), numLabels_(numLabels), parameterIDs_(parameterIDs),feat_(feat)
+   :  weights_(&weights), numLabels_(numLabels), weightIDs_(weightIDs),feat_(feat)
 {
-  OPENGM_ASSERT( parameterIDs_.size()==feat_.size() );
+  OPENGM_ASSERT( weightIDs_.size()==feat_.size() );
 }
 
 template <class T, class I, class L>
@@ -90,35 +89,35 @@ inline
 LPotts<T, I, L>::LPotts
 ( 
    const L numLabels,
-   const std::vector<size_t>& parameterIDs,
+   const std::vector<size_t>& weightIDs,
    const std::vector<T>& feat
    )
-   : numLabels_(numLabels), parameterIDs_(parameterIDs),feat_(feat)
+   : numLabels_(numLabels), weightIDs_(weightIDs),feat_(feat)
 {
-  OPENGM_ASSERT( parameterIDs_.size()==feat_.size() );
+  OPENGM_ASSERT( weightIDs_.size()==feat_.size() );
 }
 
 template <class T, class I, class L>
 inline
 LPotts<T, I, L>::LPotts
 ( )
-   : numLabels_(0), parameterIDs_(std::vector<size_t>(0)), feat_(std::vector<T>(0))
+   : numLabels_(0), weightIDs_(std::vector<size_t>(0)), feat_(std::vector<T>(0))
 {
-  OPENGM_ASSERT( parameterIDs_.size()==feat_.size() );
+  OPENGM_ASSERT( weightIDs_.size()==feat_.size() );
 }
 
 
 template <class T, class I, class L>
 template <class ITERATOR>
 inline T
-LPotts<T, I, L>::parameterGradient 
+LPotts<T, I, L>::weightGradient 
 (
-   size_t parameterNumber,
+   size_t weightNumber,
    ITERATOR begin
 ) const {
-  OPENGM_ASSERT(parameterNumber< numberOfParameters());
+  OPENGM_ASSERT(weightNumber< numberOfWeights());
   if( *(begin) != *(begin+1) )
-    return (*this).feat_[parameterNumber];
+    return (*this).feat_[weightNumber];
   return 0;
 }
 
@@ -130,8 +129,8 @@ LPotts<T, I, L>::operator()
    ITERATOR begin
 ) const {
    T val = 0;
-   for(size_t i=0;i<numberOfParameters();++i){
-      val += parameters_->getParameter(i) * parameterGradient(i,begin);
+   for(size_t i=0;i<numberOfWeights();++i){
+      val += weights_->getWeight(i) * weightGradient(i,begin);
    }
    return val;
 }
@@ -189,7 +188,7 @@ FunctionSerialization<opengm::functions::learnable::LPotts<T, I, L> >::indexSequ
 (
    const opengm::functions::learnable::LPotts<T, I, L> & src
 ) {
-  return 2+src.parameterIDs_.size();
+  return 2+src.weightIDs_.size();
 }
 
 template<class T, class I, class L>
@@ -214,8 +213,8 @@ FunctionSerialization<opengm::functions::learnable::LPotts<T, I, L> >::serialize
    ++indexOutIterator; 
    *indexOutIterator = src.feat_.size();
    ++indexOutIterator;
-   for(size_t i=0; i<src.parameterIDs_.size();++i){
-     *indexOutIterator = src.parameterIndex(i);
+   for(size_t i=0; i<src.weightIDs_.size();++i){
+     *indexOutIterator = src.weightIndex(i);
      ++indexOutIterator;
    } 
    for(size_t i=0; i<src.feat_.size();++i){
@@ -237,10 +236,10 @@ FunctionSerialization<opengm::functions::learnable::LPotts<T, I, L> >::deseriali
    ++ indexInIterator;
    const size_t numW=*indexInIterator;
    dst.feat_.resize(numW);
-   dst.parameterIDs_.resize(numW);
+   dst.weightIDs_.resize(numW);
    for(size_t i=0; i<numW;++i){
      dst.feat_[i]=*valueInIterator;
-     dst.parameterIDs_[i]=*indexInIterator;
+     dst.weightIDs_[i]=*indexInIterator;
      ++indexInIterator;
      ++valueInIterator;
    }
diff --git a/include/opengm/functions/learnable/sum_of_experts.hxx b/include/opengm/functions/learnable/sum_of_experts.hxx
index 5f4e54f..cfca910 100644
--- a/include/opengm/functions/learnable/sum_of_experts.hxx
+++ b/include/opengm/functions/learnable/sum_of_experts.hxx
@@ -10,6 +10,7 @@
 #include "opengm/functions/function_registration.hxx"
 #include "opengm/functions/function_properties_base.hxx"
 #include "opengm/datastructures/marray/marray.hxx"
+#include "opengm/graphicalmodel/weights.hxx"
 
 namespace opengm {
 namespace functions {
@@ -33,12 +34,11 @@ public:
    typedef I IndexType;
  
    SumOfExperts();
-   SumOfExperts( 
-      const std::vector<L>& shape,
-      const Parameters<T,I>& parameters,
-      const std::vector<size_t>& parameterIDs,
+   SumOfExperts(const std::vector<L>& shape,
+      const opengm::learning::Weights<T>& weights,
+      const std::vector<size_t>& weightIDs,
       const std::vector<marray::Marray<T> >& feat
-      ); 
+      );
  
    L shape(const size_t) const;
    size_t size() const;
@@ -46,19 +46,19 @@ public:
    template<class ITERATOR> T operator()(ITERATOR) const;
  
    // parameters
-   void setParameters(const Parameters<T,I>& parameters)
-      {parameters_ = ¶meters;}
-   size_t numberOfParameters()const
-     {return parameterIDs_.size();}
-   I parameterIndex(const size_t paramNumber) const
-     {return parameterIDs_[paramNumber];} //dummy
+   void setWeights(const opengm::learning::Weights<T>& weights)
+      {weights_ = &weights;}
+   size_t numberOfWeights()const
+     {return weightIDs_.size();}
+   I weightIndex(const size_t weightNumber) const
+     {return weightIDs_[weightNumber];} //dummy
    template<class ITERATOR> 
-   T parameterGradient(size_t,ITERATOR) const;
+   T weightGradient(size_t,ITERATOR) const;
 
 protected:
-   const Parameters<T,I>*                  parameters_;
+   const opengm::learning::Weights<T>*                  weights_;
    std::vector<L>                          shape_;
-   std::vector<size_t>                     parameterIDs_;
+   std::vector<size_t>                     weightIDs_;
    std::vector<marray::Marray<T> > feat_;
 
    friend class opengm::FunctionSerialization<opengm::functions::learnable::SumOfExperts<T, I, L> >;
@@ -70,20 +70,20 @@ inline
 SumOfExperts<T, I, L>::SumOfExperts
 ( 
    const std::vector<L>&                           shape,
-   const Parameters<T,I>&                          parameters,
-   const std::vector<size_t>&                      parameterIDs,
+   const opengm::learning::Weights<T>&                          weights,
+   const std::vector<size_t>&                      weightIDs,
    const std::vector<marray::Marray<T> >&  feat
    )
-   :   shape_(shape), parameters_(&parameters), parameterIDs_(parameterIDs),feat_(feat)
+   :   shape_(shape), weights_(&weights), weightIDs_(weightIDs),feat_(feat)
 {
    OPENGM_ASSERT( size() == feat_[0].size() );
-   OPENGM_ASSERT( parameterIDs_.size() == feat_.size() );
+   OPENGM_ASSERT( weightIDs_.size() == feat_.size() );
 }
 
 template <class T, class I, class L>
 inline
 SumOfExperts<T, I, L>::SumOfExperts()
-   : shape_(std::vector<L>(0)), parameterIDs_(std::vector<size_t>(0)), feat_(std::vector<marray::Marray<T> >(0))
+   : shape_(std::vector<L>(0)), weightIDs_(std::vector<size_t>(0)), feat_(std::vector<marray::Marray<T> >(0))
 {
    ;
 }
@@ -92,13 +92,13 @@ SumOfExperts<T, I, L>::SumOfExperts()
 template <class T, class I, class L>
 template <class ITERATOR>
 inline T
-SumOfExperts<T, I, L>::parameterGradient 
+SumOfExperts<T, I, L>::weightGradient 
 (
-   size_t parameterNumber,
+   size_t weightNumber,
    ITERATOR begin
 ) const {
-  OPENGM_ASSERT(parameterNumber< numberOfParameters());
-  return feat_[parameterNumber](begin);
+  OPENGM_ASSERT(weightNumber< numberOfWeights());
+  return feat_[weightNumber](begin);
 }
 
 template <class T, class I, class L>
@@ -109,8 +109,8 @@ SumOfExperts<T, I, L>::operator()
    ITERATOR begin
 ) const {
    T val = 0;
-   for(size_t i=0;i<numberOfParameters();++i){
-      val += parameters_->getParameter(i) * parameterGradient(i,begin);
+   for(size_t i=0;i<numberOfWeights();++i){
+      val += weights_->getWeight(i) * weightGradient(i,begin);
    }
    return val;
 }
@@ -171,7 +171,7 @@ FunctionSerialization<opengm::functions::learnable::SumOfExperts<T, I, L> >::ind
 (
    const opengm::functions::learnable::SumOfExperts<T, I, L> & src
 ) {
-   return 1+src.shape_.size()+1+src.parameterIDs_.size();
+   return 1+src.shape_.size()+1+src.weightIDs_.size();
 }
 
 template<class T, class I, class L>
@@ -200,15 +200,15 @@ FunctionSerialization<opengm::functions::learnable::SumOfExperts<T, I, L> >::ser
       ++indexOutIterator; 
    }
    //save parameter ids
-   *indexOutIterator = src.parameterIDs_.size();
+   *indexOutIterator = src.weightIDs_.size();
    ++indexOutIterator; 
-   for(size_t i=0; i<src.parameterIDs_.size();++i){
-      *indexOutIterator = src.parameterIDs_[i];
+   for(size_t i=0; i<src.weightIDs_.size();++i){
+      *indexOutIterator = src.weightIDs_[i];
       ++indexOutIterator; 
    }
 
    // save features  
-   for(size_t i=0; i<src.parameterIDs_.size();++i){
+   for(size_t i=0; i<src.weightIDs_.size();++i){
       for(size_t j=0; j<src.feat_[i].size();++j){
          *valueOutIterator = src.feat_[i](j);
          ++valueOutIterator;
diff --git a/include/opengm/functions/learnablefunction.hxx b/include/opengm/functions/learnablefunction.hxx
index 394c011..45bc999 100644
--- a/include/opengm/functions/learnablefunction.hxx
+++ b/include/opengm/functions/learnablefunction.hxx
@@ -33,7 +33,7 @@ public:
    typedef I IndexType;
 
    LearnableFeatureFunction(
-      const Parameters<T,I>& parameters,
+      const opengm::learning::Weights<T>& parameters,
       const std::vector<L>& shape,
       const std::vector<size_t>& parameterIDs,
       const std::vector<T>& feat
@@ -44,15 +44,15 @@ public:
    template<class ITERATOR> T operator()(ITERATOR) const;
  
    // parameters
-   size_t numberOfParameters()const
+   size_t numberOfWeights()const
      {return parameterIDs_.size();}
-   I parameterIndex(const size_t paramNumber) const
+   I weightIndex(const size_t paramNumber) const
      {return parameterIDs_[paramNumber];} //dummy
    template<class ITERATOR> 
    T paramaterGradient(size_t,ITERATOR) const;
 
 protected:
-   const Parameters<T,I> * parameters_;
+   const opengm::learning::Weights<T> * parameters_;
    const std::vector<L> shape_;
    const std::vector<size_t> parameterIDs_;
    const std::vector<T> feat_;
@@ -73,7 +73,7 @@ template <class T, class I, class L>
 inline
 LearnableFeatureFunction<T, I, L>::LearnableFeatureFunction
 ( 
-   const Parameters<T,I>& parameters,
+   const opengm::learning::Weights<T>& parameters,
    const std::vector<L>& shape,
    const std::vector<size_t>& parameterIDs,
    const std::vector<T>& feat
@@ -89,7 +89,7 @@ LearnableFeatureFunction<T, I, L>::paramaterGradient
    size_t parameterNumber,
    ITERATOR begin
 ) const {
-   OPENGM_ASSERT(parameterNumber< numberOfParameters());
+   OPENGM_ASSERT(parameterNumber< numberOfWeights());
    return 0; // need to be implemented
 }
 
@@ -102,8 +102,8 @@ LearnableFeatureFunction<T, I, L>::operator()
    ITERATOR begin
 ) const {
    T val = 0;
-   for(size_t i=0;i<numberOfParameters();++i){
-      val += parameters_->getParameter(i) * paramaterGradient(i,begin);
+   for(size_t i=0;i<numberOfWeights();++i){
+      val += parameters_->getWeight(i) * paramaterGradient(i,begin);
    }
 }
 
diff --git a/include/opengm/functions/potts.hxx b/include/opengm/functions/potts.hxx
index 092887f..cb061f9 100644
--- a/include/opengm/functions/potts.hxx
+++ b/include/opengm/functions/potts.hxx
@@ -33,7 +33,7 @@ public:
    bool operator==(const PottsFunction& ) const;
    ValueType valueEqual() const;
    ValueType valueNotEqual() const;
-   IndexType numberOfParameters() const;
+   IndexType numberOfWeights() const;
    ValueType parameter(const size_t index) const;
    ValueType& parameter(const size_t index);
 
@@ -211,7 +211,7 @@ PottsFunction<T, I, L>::operator==
 
 template<class T, class I, class L>
 inline typename PottsFunction<T, I, L>::IndexType
-PottsFunction<T, I, L>::numberOfParameters() const
+PottsFunction<T, I, L>::numberOfWeights() const
 {
    return 2;
 }
diff --git a/include/opengm/graphicalmodel/parameters.hxx b/include/opengm/graphicalmodel/parameters.hxx
deleted file mode 100644
index 22d9aa5..0000000
--- a/include/opengm/graphicalmodel/parameters.hxx
+++ /dev/null
@@ -1,44 +0,0 @@
-#ifndef OPENGM_PARAMETERS
-#define OPENGM_PARAMETERS
-
-
-namespace opengm{
-
-   template<class T,class I>
-   class Parameters{
-   public:
-      typedef T ValueType;
-      typedef I IndexType;
-
-
-      Parameters(const IndexType numberOfParameters=0)
-      : params_(numberOfParameters){
-
-      }
-
-      ValueType getParameter(const size_t pi)const{
-         OPENGM_ASSERT_OP(pi,<,params_.size());
-         return params_[pi];
-      }
-
-      void setParameter(const size_t pi,const ValueType value){
-         OPENGM_ASSERT_OP(pi,<,params_.size());
-         params_[pi]=value;
-      }
-
-      ValueType operator[](const size_t pi)const{
-         return getParameter(pi);
-      }
-
-      size_t numberOfParameters()const{
-         return params_.size();
-      }
-
-   private:
-
-      std::vector<ValueType> params_;
-   };
-}
-
-
-#endif /* OPENGM_PARAMETERS */
\ No newline at end of file
diff --git a/include/opengm/graphicalmodel/weights.hxx b/include/opengm/graphicalmodel/weights.hxx
new file mode 100644
index 0000000..d9246a8
--- /dev/null
+++ b/include/opengm/graphicalmodel/weights.hxx
@@ -0,0 +1,44 @@
+#ifndef OPENGM_PARAMETERS
+#define OPENGM_PARAMETERS
+
+
+namespace opengm{
+namespace learning{
+
+   template<class T>
+   class Weights{
+   public:
+      typedef T ValueType;
+
+      Weights(const size_t numberOfParameters=0)
+      : weights_(numberOfParameters){
+
+      }
+
+      ValueType getWeight(const size_t pi)const{
+         OPENGM_ASSERT_OP(pi,<,weights_.size());
+         return weights_[pi];
+      }
+
+      void setWeight(const size_t pi,const ValueType value){
+         OPENGM_ASSERT_OP(pi,<,weights_.size());
+         weights_[pi]=value;
+      }
+
+      ValueType operator[](const size_t pi)const{
+         return getWeight(pi);
+      }
+
+      size_t numberOfWeights()const{
+         return weights_.size();
+      }
+
+   private:
+
+      std::vector<ValueType> weights_;
+   };
+} // namespace learning
+} // namespace opengm
+
+
+#endif /* OPENGM_PARAMETERS */
diff --git a/include/opengm/learning/bundle-optimizer.hxx b/include/opengm/learning/bundle-optimizer.hxx
index 447faa4..aae8df9 100644
--- a/include/opengm/learning/bundle-optimizer.hxx
+++ b/include/opengm/learning/bundle-optimizer.hxx
@@ -50,8 +50,8 @@ public:
 	 * Start the bundle method optimization on the given oracle. The oracle has 
 	 * to model:
 	 *
-	 *   ModelParameters current;
-	 *   ModelParameters gradient;
+     *   Weights current;
+     *   Weights gradient;
 	 *   double          value;
 	 *
 	 *   valueAndGradient = oracle(current, value, gradient);
@@ -59,13 +59,13 @@ public:
 	 * and should return the value and gradient of the objective function 
 	 * (passed by reference) at point 'current'.
 	 */
-	template <typename Oracle, typename ModelParameters>
-	OptimizerResult optimize(Oracle& oracle, ModelParameters& w);
+    template <typename Oracle, typename Weights>
+    OptimizerResult optimize(Oracle& oracle, Weights& w);
 
 private:
 
-	template <typename ModelParameters>
-	void setupQp(const ModelParameters& w);
+    template <typename Weights>
+    void setupQp(const Weights& w);
 
 	void findMinLowerBound(std::vector<ValueType>& w, ValueType& value);
 
@@ -89,9 +89,9 @@ BundleOptimizer<T>::~BundleOptimizer() {
 }
 
 template <typename T>
-template <typename Oracle, typename ModelParameters>
+template <typename Oracle, typename Weights>
 OptimizerResult
-BundleOptimizer<T>::optimize(Oracle& oracle, ModelParameters& w) {
+BundleOptimizer<T>::optimize(Oracle& oracle, Weights& w) {
 
 	setupQp(w);
 
@@ -119,7 +119,7 @@ BundleOptimizer<T>::optimize(Oracle& oracle, ModelParameters& w) {
 
 		std::cout << std::endl << "----------------- iteration " << t << std::endl;
 
-		ModelParameters w_tm1 = w;
+        Weights w_tm1 = w;
 
 		//std::cout << "current w is " << w_tm1 << std::endl;
 
@@ -127,7 +127,7 @@ BundleOptimizer<T>::optimize(Oracle& oracle, ModelParameters& w) {
 		T L_w_tm1 = 0.0;
 
 		// gradient of L at current w
-		ModelParameters a_t(w.numberOfParameters());
+        Weights a_t(w.numberOfWeights());
 
 		// get current value and gradient
 		oracle(w_tm1, L_w_tm1, a_t);
@@ -182,9 +182,9 @@ BundleOptimizer<T>::optimize(Oracle& oracle, ModelParameters& w) {
 }
 
 template <typename T>
-template <typename ModelParameters>
+template <typename Weights>
 void
-BundleOptimizer<T>::setupQp(const ModelParameters& w) {
+BundleOptimizer<T>::setupQp(const Weights& w) {
 
 	/*
 	  w* = argmin λ½|w|² + ξ, s.t. <w,a_i> + b_i ≤ ξ ∀i
@@ -194,14 +194,14 @@ BundleOptimizer<T>::setupQp(const ModelParameters& w) {
 		_solver = solver::QuadraticSolverFactory::Create();
 
 	// one variable for each component of w and for ξ
-	solver::QuadraticObjective obj(w.numberOfParameters() + 1);
+    solver::QuadraticObjective obj(w.numberOfWeights() + 1);
 
 	// regularizer
-	for (unsigned int i = 0; i < w.numberOfParameters(); i++)
+    for (unsigned int i = 0; i < w.numberOfWeights(); i++)
 		obj.setQuadraticCoefficient(i, i, 0.5*_parameter.lambda);
 
 	// ξ
-	obj.setCoefficient(w.numberOfParameters(), 1.0);
+    obj.setCoefficient(w.numberOfWeights(), 1.0);
 
 	// we minimize
 	obj.setSense(solver::Minimize);
diff --git a/include/opengm/learning/dataset/dataset.hxx b/include/opengm/learning/dataset/dataset.hxx
index 6d4752f..e1b45a2 100644
--- a/include/opengm/learning/dataset/dataset.hxx
+++ b/include/opengm/learning/dataset/dataset.hxx
@@ -5,6 +5,7 @@
 #include <vector>
 #include <cstdlib>
 
+#include "../../graphicalmodel/weights.hxx"
 
 namespace opengm {
    namespace datasets{
@@ -16,12 +17,12 @@ namespace opengm {
          typedef typename GM::ValueType ValueType;
          typedef typename GM::IndexType IndexType;
          typedef typename GM::LabelType LabelType; 
-         typedef opengm::Parameters<ValueType,IndexType> ModelParameters;
+         typedef opengm::learning::Weights<ValueType> Weights;
 
          GM&                           getModel(const size_t i)  { return gms_[i]; }
          const std::vector<LabelType>& getGT(const size_t i)     { return gt_; }
-         ModelParameters&              getModelParameters()      { return modelParameters_; }
-         size_t                        getNumberOfParameters()   { return 1; }
+         Weights&                      getWeights()              { return weights_; }
+         size_t                        getNumberOfWeights()      { return 1; }
          size_t                        getNumberOfModels()       { return gms_.size(); } 
          
          Dataset();
@@ -30,7 +31,7 @@ namespace opengm {
       private:
          std::vector<GM> gms_; 
          std::vector<std::vector<LabelType> > gt_; 
-         ModelParameters modelParameters_;
+         Weights weights_;
       };
       
 
@@ -39,7 +40,7 @@ namespace opengm {
       Dataset<GM>::Dataset()
          :  gms_(std::vector<GM>(0)),
             gt_(std::vector<std::vector<LabelType> >(0)),
-            modelParameters_(ModelParameters(0))
+            weights_(Weights(0))
       {
       }; 
 
@@ -52,14 +53,14 @@ namespace opengm {
          hid_t file =  marray::hdf5::openFile(hss.str());
          std::vector<size_t> temp(1);
          marray::hdf5::loadVec(file, "numberOfParameters", temp);
-         size_t numPara = temp[0];
+         size_t numWeights = temp[0];
          marray::hdf5::loadVec(file, "numberOfModels", temp);
          size_t numModel = temp[0];
          marray::hdf5::closeFile(file);
          
          gms_.resize(numModel);
          gt_.resize(numModel);
-         modelParameters_ = ModelParameters(numPara);
+         weights_ = Weights(numWeights);
          //Load Models and ground truth
          for(size_t m=0; m<numModel; ++m){
             std::stringstream ss;
diff --git a/include/opengm/learning/dataset/testdataset.hxx b/include/opengm/learning/dataset/testdataset.hxx
index 8504946..4e807eb 100644
--- a/include/opengm/learning/dataset/testdataset.hxx
+++ b/include/opengm/learning/dataset/testdataset.hxx
@@ -17,12 +17,12 @@ namespace opengm {
          typedef typename GM::ValueType ValueType;
          typedef typename GM::IndexType IndexType;
          typedef typename GM::LabelType LabelType; 
-         typedef opengm::Parameters<ValueType,IndexType> ModelParameters;
+         typedef opengm::learning::Weights<ValueType> Weights;
 
          GM&                           getModel(const size_t i)  { return gms_[i]; }
          const std::vector<LabelType>& getGT(const size_t i)     { return gt_; }
-         ModelParameters&              getModelParameters()      { return modelParameters_; }
-         size_t                        getNumberOfParameters()   { return 1; }
+         Weights&                      getWeights()              { return weights_; }
+         size_t                        getNumberOfWeights()      { return 1; }
          size_t                        getNumberOfModels()       { return gms_.size(); } 
          
          TestDataset(size_t numModels=10); 
@@ -30,14 +30,14 @@ namespace opengm {
       private:
          std::vector<GM> gms_; 
          std::vector<LabelType> gt_; 
-         ModelParameters modelParameters_;
+         Weights weights_;
       };
       
 
 
       template<class GM>
       TestDataset<GM>::TestDataset(size_t numModels)
-         : modelParameters_(ModelParameters(1))
+         : weights_(Weights(1))
       {
          LabelType numberOfLabels = 2;
          gt_.resize(64*64,0);
@@ -65,7 +65,7 @@ namespace opengm {
                }
             }
           
-            opengm::functions::learnable::LPotts<ValueType,IndexType,LabelType> f(modelParameters_,2,std::vector<size_t>(1,0),std::vector<ValueType>(1,1));
+            opengm::functions::learnable::LPotts<ValueType,IndexType,LabelType> f(weights_,2,std::vector<size_t>(1,0),std::vector<ValueType>(1,1));
             typename GM::FunctionIdentifier fid = gms_[m].addFunction(f);      
             for(size_t y = 0; y < 64; ++y){ 
                for(size_t x = 0; x < 64; ++x) {
diff --git a/include/opengm/learning/dataset/testdataset2.hxx b/include/opengm/learning/dataset/testdataset2.hxx
index 630f8b4..06ba46f 100644
--- a/include/opengm/learning/dataset/testdataset2.hxx
+++ b/include/opengm/learning/dataset/testdataset2.hxx
@@ -18,12 +18,12 @@ namespace opengm {
          typedef typename GM::ValueType ValueType;
          typedef typename GM::IndexType IndexType;
          typedef typename GM::LabelType LabelType; 
-         typedef opengm::Parameters<ValueType,IndexType> ModelParameters;
+         typedef opengm::learning::Weights<ValueType> Weights;
 
          GM&                           getModel(const size_t i)  { return gms_[i]; }
          const std::vector<LabelType>& getGT(const size_t i)     { return gt_; }
-         ModelParameters&              getModelParameters()      { return modelParameters_; }
-         size_t                        getNumberOfParameters()   { return 3; }
+         Weights&                      getWeights()              { return weights_; }
+         size_t                        getNumberOfWeights()      { return 3; }
          size_t                        getNumberOfModels()       { return gms_.size(); } 
          
          TestDataset2(size_t numModels=4); 
@@ -31,14 +31,14 @@ namespace opengm {
       private:
          std::vector<GM> gms_; 
          std::vector<LabelType> gt_; 
-         ModelParameters modelParameters_;
+         Weights weights_;
       };
       
 
 
       template<class GM>
       TestDataset2<GM>::TestDataset2(size_t numModels)
-         : modelParameters_(ModelParameters(3))
+         : weights_(Weights(3))
       {
          LabelType numberOfLabels = 2;
          gt_.resize(64*64,0);
@@ -64,7 +64,7 @@ namespace opengm {
                   feat[1](1) = std::fabs(val1-1);
                   std::vector<size_t> wID(2);
                   wID[0]=1;  wID[1]=2;
-                  opengm::functions::learnable::SumOfExperts<ValueType,IndexType,LabelType> f(shape,modelParameters_, wID, feat);
+                  opengm::functions::learnable::SumOfExperts<ValueType,IndexType,LabelType> f(shape,weights_, wID, feat);
                   typename GM::FunctionIdentifier fid =  gms_[m].addFunction(f);
 
                   // factor
@@ -73,7 +73,7 @@ namespace opengm {
                }
             }
           
-            opengm::functions::learnable::LPotts<ValueType,IndexType,LabelType> f(modelParameters_,2,std::vector<size_t>(1,0),std::vector<ValueType>(1,1));
+            opengm::functions::learnable::LPotts<ValueType,IndexType,LabelType> f(weights_,2,std::vector<size_t>(1,0),std::vector<ValueType>(1,1));
             typename GM::FunctionIdentifier fid = gms_[m].addFunction(f);      
             for(size_t y = 0; y < 64; ++y){ 
                for(size_t x = 0; x < 64; ++x) {
diff --git a/include/opengm/learning/gridsearch-learning.hxx b/include/opengm/learning/gridsearch-learning.hxx
index 353814b..f6ac484 100644
--- a/include/opengm/learning/gridsearch-learning.hxx
+++ b/include/opengm/learning/gridsearch-learning.hxx
@@ -33,12 +33,12 @@ namespace opengm {
          //template<class INF, class VISITOR>
          //void learn(typename INF::Parameter para, VITITOR vis);
 
-         const opengm::Parameters<double,size_t>& getModelParameters(){return modelParameters_;} 
+         const opengm::learning::Weights<double>& getWeights(){return weights_;}
          Parameter& getLerningParameters(){return para_;}
 
       private:
          DATASET& dataset_;
-         opengm::Parameters<double,size_t> modelParameters_;
+         opengm::learning::Weights<double> weights_;
          Parameter para_;
       }; 
 
@@ -46,13 +46,13 @@ namespace opengm {
       GridSearchLearner<DATASET, LOSS>::GridSearchLearner(DATASET& ds, Parameter& p )
          : dataset_(ds), para_(p)
       {
-         modelParameters_ = opengm::Parameters<double,size_t>(ds.getNumberOfParameters());
-         if(para_.parameterUpperbound_.size() != ds.getNumberOfParameters())
-            para_.parameterUpperbound_.resize(ds.getNumberOfParameters(),10.0);  
-         if(para_.parameterLowerbound_.size() != ds.getNumberOfParameters())
-            para_.parameterLowerbound_.resize(ds.getNumberOfParameters(),0.0); 
-         if(para_.testingPoints_.size() != ds.getNumberOfParameters())
-            para_.testingPoints_.resize(ds.getNumberOfParameters(),10); 
+         weights_ = opengm::learning::Weights<double>(ds.getNumberOfWeights());
+         if(para_.parameterUpperbound_.size() != ds.getNumberOfWeights())
+            para_.parameterUpperbound_.resize(ds.getNumberOfWeights(),10.0);
+         if(para_.parameterLowerbound_.size() != ds.getNumberOfWeights())
+            para_.parameterLowerbound_.resize(ds.getNumberOfWeights(),0.0);
+         if(para_.testingPoints_.size() != ds.getNumberOfWeights())
+            para_.testingPoints_.resize(ds.getNumberOfWeights(),10);
       }
 
 
@@ -60,20 +60,20 @@ namespace opengm {
       template<class INF>
       void GridSearchLearner<DATASET, LOSS>::learn(typename INF::Parameter& para){
          // generate model Parameters
-         opengm::Parameters<double,size_t> modelPara( dataset_.getNumberOfParameters() );
-         opengm::Parameters<double,size_t> bestModelPara( dataset_.getNumberOfParameters() );
+         opengm::learning::Weights<double> modelPara( dataset_.getNumberOfWeights() );
+         opengm::learning::Weights<double> bestModelPara( dataset_.getNumberOfWeights() );
          double                            bestLoss = 100000000.0; 
-         std::vector<size_t> itC(dataset_.getNumberOfParameters(),0);
+         std::vector<size_t> itC(dataset_.getNumberOfWeights(),0);
 
          LOSS lossFunction;
          bool search=true;
          while(search){
             // Get Parameter
-            for(size_t p=0; p<dataset_.getNumberOfParameters(); ++p){
-               modelPara.setParameter(p, para_.parameterLowerbound_[p] + double(itC[p])/double(para_.testingPoints_[p]-1)*(para_.parameterUpperbound_[p]-para_.parameterLowerbound_[p]) );
+            for(size_t p=0; p<dataset_.getNumberOfWeights(); ++p){
+               modelPara.setWeight(p, para_.parameterLowerbound_[p] + double(itC[p])/double(para_.testingPoints_[p]-1)*(para_.parameterUpperbound_[p]-para_.parameterLowerbound_[p]) );
             }
             // Evaluate Loss
-            opengm::Parameters<double,size_t>& mp =  dataset_.getModelParameters();
+            opengm::learning::Weights<double>& mp =  dataset_.getWeights();
             mp = modelPara;
             std::vector< std::vector<typename INF::LabelType> > confs( dataset_.getNumberOfModels() );
             double loss = 0;
@@ -86,7 +86,7 @@ namespace opengm {
             }
             
             // *call visitor*
-            for(size_t p=0; p<dataset_.getNumberOfParameters(); ++p){
+            for(size_t p=0; p<dataset_.getNumberOfWeights(); ++p){
                std::cout << modelPara[p] <<" ";
             }
             std::cout << " ==> ";
@@ -98,26 +98,26 @@ namespace opengm {
                bestModelPara=modelPara;
             }
             //Increment Parameter
-            for(size_t p=0; p<dataset_.getNumberOfParameters(); ++p){
+            for(size_t p=0; p<dataset_.getNumberOfWeights(); ++p){
                if(itC[p]<para_.testingPoints_[p]-1){
                   ++itC[p];
                   break;
                }
                else{
                   itC[p]=0;
-                  if (p==dataset_.getNumberOfParameters()-1)
+                  if (p==dataset_.getNumberOfWeights()-1)
                      search = false; 
                }             
             }
 
          }
          std::cout << "Best"<<std::endl;
-         for(size_t p=0; p<dataset_.getNumberOfParameters(); ++p){
+         for(size_t p=0; p<dataset_.getNumberOfWeights(); ++p){
             std::cout << bestModelPara[p] <<" ";
          }
          std::cout << " ==> ";
          std::cout << bestLoss << std::endl;
-         modelParameters_ = bestModelPara;
+         weights_ = bestModelPara;
       };
    }
 }
diff --git a/include/opengm/learning/struct-max-margin.hxx b/include/opengm/learning/struct-max-margin.hxx
index 9a15b4b..1ee2994 100644
--- a/include/opengm/learning/struct-max-margin.hxx
+++ b/include/opengm/learning/struct-max-margin.hxx
@@ -23,7 +23,7 @@ public:
 	typedef O  OptimizerType;
 
 	typedef typename DatasetType::ValueType       ValueType;
-	typedef typename DatasetType::ModelParameters ModelParameters;
+    typedef typename DatasetType::Weights         Weights;
 
 	struct Parameter {
 
@@ -46,7 +46,7 @@ public:
 	template <typename InferenceType>
 	void learn(typename InferenceType::Parameter& parameter);
 
-	const ModelParameters& getModelParameters() { return _learntParameters; }
+    const Weights& getWeights() { return _weights; }
 
 private:
 
@@ -62,7 +62,7 @@ private:
 			 * Evaluate the loss-augmented energy value of the dataset and its 
 			 * gradient at w.
 			 */
-			void operator()(const ModelParameters& w, double& value, ModelParameters& gradient) {
+            void operator()(const Weights& w, double& value, Weights& gradient) {
 
 				for (int i = 0; i < _dataset.getNumberOfModels(); i++) {
 
@@ -83,7 +83,7 @@ private:
 
 	OptimizerType _optimizer;
 
-	ModelParameters _learntParameters;
+    Weights _weights;
 };
 
 template <typename DS, typename LG, typename O>
@@ -100,7 +100,7 @@ StructMaxMargin<DS, LG, O>::learn(typename InfereneType::Parameter& infParams) {
 	Oracle<InfereneType> oracle(_dataset);
 
 	// minimize structured loss
-	OptimizerResult result = _optimizer.optimize(oracle, _learntParameters);
+    OptimizerResult result = _optimizer.optimize(oracle, _weights);
 
 	if (result == Error)
 		throw opengm::RuntimeError("optimizer did not succeed");
diff --git a/src/unittest/test_gm_learning_functions.cxx b/src/unittest/test_gm_learning_functions.cxx
index 4c99ca6..c78317e 100644
--- a/src/unittest/test_gm_learning_functions.cxx
+++ b/src/unittest/test_gm_learning_functions.cxx
@@ -70,10 +70,10 @@ struct GraphicalModelTest {
       GmType gmA(opengm::DiscreteSpace<I, L > (nos, nos + 3));
 
       // parameter
-      const size_t numparam = 1;
-      opengm::Parameters<T,I> param(numparam);
-      param.setParameter(0,5.0);
-      LPF lPotts(2,2,param,0);
+      const size_t numweights = 1;
+      opengm::learning::Weights<T> weights(numweights);
+      weights.setWeight(0,5.0);
+      LPF lPotts(2,2,weights,0);
 
 
       I labels00[2]={0,0};
@@ -87,7 +87,7 @@ struct GraphicalModelTest {
       OPENGM_ASSERT_OP(lPotts(labels10),<,5.01);
 
 
-      param.setParameter(0,3.0);
+      weights.setWeight(0,3.0);
 
       OPENGM_ASSERT_OP(lPotts(labels01),>,2.99);
       OPENGM_ASSERT_OP(lPotts(labels01),<,3.01);
diff --git a/src/unittest/test_gridsearch_learner.cxx b/src/unittest/test_gridsearch_learner.cxx
index 653a0e9..dd07c84 100644
--- a/src/unittest/test_gridsearch_learner.cxx
+++ b/src/unittest/test_gridsearch_learner.cxx
@@ -35,7 +35,7 @@ int main() {
   
    {
       DS dataset;
-      std::cout << "Dataset includes " << dataset.getNumberOfModels() << " instances and has " << dataset.getNumberOfParameters() << " parameters."<<std::endl; 
+      std::cout << "Dataset includes " << dataset.getNumberOfModels() << " instances and has " << dataset.getNumberOfWeights() << " parameters."<<std::endl;
       
       
       opengm::learning::GridSearchLearner<DS,LOSS>::Parameter para;
@@ -52,7 +52,7 @@ int main() {
   
    {
       DS2 dataset;
-      std::cout << "Dataset includes " << dataset.getNumberOfModels() << " instances and has " << dataset.getNumberOfParameters() << " parameters."<<std::endl; 
+      std::cout << "Dataset includes " << dataset.getNumberOfModels() << " instances and has " << dataset.getNumberOfWeights() << " parameters."<<std::endl;
       
       
       opengm::learning::GridSearchLearner<DS2,LOSS>::Parameter para;
diff --git a/src/unittest/test_learnable_functions.cxx b/src/unittest/test_learnable_functions.cxx
index b2c63cd..030ad40 100644
--- a/src/unittest/test_learnable_functions.cxx
+++ b/src/unittest/test_learnable_functions.cxx
@@ -14,8 +14,8 @@ struct LearnableFunctionsTest {
     std::cout << " * Learnable Feature Function ..." << std::flush; 
     // parameter
     const size_t numparam = 1;
-    opengm::Parameters<ValueType,IndexType> param(numparam);
-    param.setParameter(0,5.0);
+    opengm::learning::Weights<ValueType> param(numparam);
+    param.setWeight(0,5.0);
     
     std::vector<LabelType> shape(2,3);
     std::vector<size_t> pIds(1,0);
@@ -36,8 +36,8 @@ struct LearnableFunctionsTest {
     std::cout << " * LearnablePotts ..." << std::flush; 
     // parameter
     const size_t numparam = 1;
-    opengm::Parameters<ValueType,IndexType> param(numparam);
-    param.setParameter(0,5.0);
+    opengm::learning::Weights<ValueType> param(numparam);
+    param.setWeight(0,5.0);
     
     LabelType numL = 3;
     std::vector<size_t> pIds(1,0);
@@ -65,7 +65,7 @@ struct LearnableFunctionsTest {
        opengm::FunctionSerialization<FUNCTION>::serialize(f,indices.begin(),values.begin());
        FUNCTION f2;
        opengm::FunctionSerialization<FUNCTION>::deserialize(indices.begin(),values.begin(),f2);
-       f2.setParameters(param);
+       f2.setWeights(param);
 
        OPENGM_TEST(f.dimension()==f2.dimension());
        OPENGM_TEST(f.size() == f2.size());
diff --git a/src/unittest/test_learning.cxx b/src/unittest/test_learning.cxx
index 503b75e..a8b9b78 100644
--- a/src/unittest/test_learning.cxx
+++ b/src/unittest/test_learning.cxx
@@ -15,7 +15,7 @@ struct LearningTest {
 			opengm::functions::learnable::LPotts<T>)                           FunctionTypeList;
 	typedef opengm::GraphicalModel<ValueType, opengm::Adder, FunctionTypeList> GraphicalModelType;
 	typedef opengm::datasets::TestDataset<GraphicalModelType>                  DatasetType;
-	typedef typename DatasetType::ModelParameters                              ModelParameters;
+    typedef typename DatasetType::Weights                              Weights;
 	typedef opengm::learning::HammingLoss                                      LossGeneratorType;
 	typedef opengm::Bruteforce<GraphicalModelType,opengm::Minimizer>           InferenceType;
 
@@ -32,9 +32,9 @@ struct LearningTest {
 		structMaxMargin.template learn<InferenceType>(infParams);
 
 		// get the result
-		const ModelParameters& learntParameters = structMaxMargin.getModelParameters(); 
-                std::cout << learntParameters.numberOfParameters()<<std::endl;
-                for(size_t i=0; i<learntParameters.numberOfParameters();++i)
+        const Weights& learntParameters = structMaxMargin.getWeights();
+                std::cout << learntParameters.numberOfWeights()<<std::endl;
+                for(size_t i=0; i<learntParameters.numberOfWeights();++i)
                    std::cout << learntParameters[i] << " ";
                 std::cout << std::endl;
 	}

-- 
Alioth's /usr/local/bin/git-commit-notice on /srv/git.debian.org/git/debian-science/packages/opengm.git



More information about the debian-science-commits mailing list