[opengm] 328/386: removed learning

Ghislain Vaillant ghisvail-guest at moszumanska.debian.org
Wed Aug 31 08:38:23 UTC 2016


This is an automated email from the git hooks/post-receive script.

ghisvail-guest pushed a commit to branch debian/master
in repository opengm.

commit 53b435c43547536e337c5e80103156fdfe7163da
Author: DerThorsten <thorsten.beier at iwr.uni-heidelberg.de>
Date:   Fri Mar 25 12:39:20 2016 +0100

    removed learning
---
 include/opengm/learning/bundle-optimizer.hxx       | 326 ---------------
 include/opengm/learning/dataset/dataset.hxx        | 234 -----------
 include/opengm/learning/dataset/dataset_io.hxx     | 134 -------
 .../opengm/learning/dataset/editabledataset.hxx    | 146 -------
 include/opengm/learning/dataset/testdatasets.hxx   | 375 ------------------
 include/opengm/learning/gradient-accumulator.hxx   | 175 --------
 include/opengm/learning/gridsearch-learning.hxx    | 126 ------
 include/opengm/learning/loss/flexibleloss.hxx      | 305 --------------
 .../learning/loss/generalized-hammingloss.hxx      | 152 -------
 include/opengm/learning/loss/hammingloss.hxx       |  81 ----
 include/opengm/learning/loss/noloss.hxx            |  73 ----
 .../learning/maximum-likelihood-learning.hxx       | 310 ---------------
 .../learning/maximum_likelihood_learning.hxx       | 238 -----------
 include/opengm/learning/rws.hxx                    | 286 --------------
 include/opengm/learning/solver/BundleCollector.h   |  49 ---
 include/opengm/learning/solver/CplexBackend.h      | 433 --------------------
 include/opengm/learning/solver/GurobiBackend.h     | 439 ---------------------
 include/opengm/learning/solver/LinearConstraint.h  |  94 -----
 include/opengm/learning/solver/LinearConstraints.h | 119 ------
 include/opengm/learning/solver/LinearObjective.h   |  24 --
 .../opengm/learning/solver/LinearSolverBackend.h   |  84 ----
 .../opengm/learning/solver/QuadraticObjective.h    | 181 ---------
 .../learning/solver/QuadraticSolverBackend.h       |  28 --
 .../learning/solver/QuadraticSolverFactory.h       |  33 --
 .../learning/solver/QuadraticSolverParameters.h    |  15 -
 include/opengm/learning/solver/Relation.h          |  20 -
 include/opengm/learning/solver/Sense.h             |  20 -
 include/opengm/learning/solver/Solution.h          |  49 ---
 include/opengm/learning/solver/VariableType.h      |  18 -
 include/opengm/learning/struct-max-margin.hxx      | 219 ----------
 include/opengm/learning/structured_perceptron.hxx  | 208 ----------
 include/opengm/learning/subgradient_ssvm.hxx       | 353 -----------------
 include/opengm/learning/weight_averaging.hxx       |  68 ----
 src/unittest/CMakeLists.txt                        |   1 -
 src/unittest/learning/CMakeLists.txt               |  51 ---
 src/unittest/learning/test_dataset.cxx             | 150 -------
 src/unittest/learning/test_dataset_io.cxx          | 101 -----
 .../learning/test_generalized_hammingloss.cxx      |  65 ---
 src/unittest/learning/test_gridsearch_learner.cxx  |  90 -----
 src/unittest/learning/test_learning.cxx            | 233 -----------
 .../learning/test_maximum_likelihood_learner.cxx   | 126 ------
 src/unittest/learning/test_subgradient_ssvm.cxx    | 238 -----------
 42 files changed, 6470 deletions(-)

diff --git a/include/opengm/learning/bundle-optimizer.hxx b/include/opengm/learning/bundle-optimizer.hxx
deleted file mode 100644
index e04dd48..0000000
--- a/include/opengm/learning/bundle-optimizer.hxx
+++ /dev/null
@@ -1,326 +0,0 @@
-#pragma once
-#ifndef OPENGM_LEARNING_BUNDLE_OPTIMIZER_HXX
-#define OPENGM_LEARNING_BUNDLE_OPTIMIZER_HXX
-
-#include "solver/BundleCollector.h"
-#include "solver/QuadraticSolverFactory.h"
-
-namespace opengm {
-
-namespace learning {
-
-//template <typename T>
-//std::ostream& operator<<(std::ostream& out, Weights<T>& w) {
-
-//    out << "[";
-//    for (int i = 0; i < w.numberOfWeights(); i++) {
-
-//        if (i > 0)
-//            out << ", ";
-//        out << w[i];
-//    }
-//    out << "]";
-//}
-
-enum OptimizerResult {
-
-	// the minimal optimization gap was reached
-	ReachedMinGap,
-
-	// the requested number of steps was exceeded
-	ReachedSteps,
-
-	// something went wrong
-	Error
-};
-
-template <typename ValueType>
-class BundleOptimizer {
-
-public:
-
-	enum EpsStrategy {
-
-		/**
-		 * Compute the eps from the gap estimate between the lower bound and the 
-		 * target objective. The gap estimate will only be correct for oracle 
-		 * calls that perform exact inference.
-		 */
-		EpsFromGap,
-
-		/**
-		 * Compute the eps from the change of the minimum of the lower bound.  
-		 * This version does also work for approximate (but deterministic) 
-		 * inference methods.
-		 */
-		EpsFromChange
-	};
-
-	struct Parameter {
-
-		Parameter() :
-			lambda(1.0),
-			min_eps(1e-5),
-			steps(0),
-			epsStrategy(EpsFromChange) {}
-
-		// regularizer weight
-		double lambda;
-
-		// the maximal number of steps to perform, 0 = no limit
-		unsigned int steps;
-
-		// bundle method stops if eps is smaller than this value
-		ValueType min_eps;
-
-		// how to compute the eps for the stopping criterion
-		EpsStrategy epsStrategy;
-        bool verbose_;
-	};
-
-	BundleOptimizer(const Parameter& parameter = Parameter());
-
-	~BundleOptimizer();
-
-	/**
-	 * Start the bundle method optimization on the given oracle. The oracle has 
-	 * to model:
-	 *
-     *   Weights current;
-     *   Weights gradient;
-	 *   double          value;
-	 *
-	 *   valueAndGradient = oracle(current, value, gradient);
-	 *
-	 * and should return the value and gradient of the objective function 
-	 * (passed by reference) at point 'current'.
-	 */
-    template <typename Oracle, typename Weights>
-    OptimizerResult optimize(Oracle& oracle, Weights& w);
-
-private:
-
-    template <typename Weights>
-    void setupQp(const Weights& w);
-
-	template <typename ModelWeights>
-	void findMinLowerBound(ModelWeights& w, ValueType& value);
-
-	template <typename ModelWeights>
-	ValueType dot(const ModelWeights& a, const ModelWeights& b);
-
-	Parameter _parameter;
-
-	solver::BundleCollector _bundleCollector;
-
-	solver::QuadraticSolverBackend* _solver;
-};
-
-template <typename T>
-BundleOptimizer<T>::BundleOptimizer(const Parameter& parameter) :
-	_parameter(parameter),
-	_solver(0) {}
-
-template <typename T>
-BundleOptimizer<T>::~BundleOptimizer() {
-
-	if (_solver)
-		delete _solver;
-}
-
-template <typename T>
-template <typename Oracle, typename Weights>
-OptimizerResult
-BundleOptimizer<T>::optimize(Oracle& oracle, Weights& w) {
-
-	setupQp(w);
-
-	/*
-	  1. w_0 = 0, t = 0
-	  2. t++
-	  3. compute a_t = ∂L(w_t-1)/∂w
-	  4. compute b_t =  L(w_t-1) - <w_t-1,a_t>
-	  5. ℒ_t(w) = max_i <w,a_i> + b_i
-	  6. w_t = argmin λ½|w|² + ℒ_t(w)
-	  7. ε_t = min_i [ λ½|w_i|² + L(w_i) ] - [ λ½|w_t|² + ℒ_t(w_t) ]
-			   ^^^^^^^^^^^^^^^^^^^^^^^^^^^   ^^^^^^^^^^^^^^^^^^^^^^^
-				 smallest L(w) ever seen    current min of lower bound
-	  8. if ε_t > ε, goto 2
-	  9. return w_t
-	*/
-
-	T minValue     =  std::numeric_limits<T>::infinity();
-	T lastMinLower = -std::numeric_limits<T>::infinity();
-
-	unsigned int t = 0;
-
-    while (true) {
-
-		t++;
-
-        if(_parameter.verbose_)
-            std::cout << std::endl << "----------------- iteration      " << t << std::endl;
-
-        Weights w_tm1 = w;
-
-        if(_parameter.verbose_){
-            std::cout << "w: ";
-            for(size_t i=0; i<w_tm1.size(); ++i)
-                std::cout << w_tm1[i] << " ";
-            std::cout << std::endl;
-        }
-
-		// value of L at current w
-		T L_w_tm1 = 0.0;
-
-		// gradient of L at current w
-        Weights a_t(w.numberOfWeights());
-
-		// get current value and gradient
-		oracle(w_tm1, L_w_tm1, a_t);
-
-        if(_parameter.verbose_){
-            std::cout << "       L(w)              is: " << L_w_tm1 << std::endl;
-            std::cout << "∂L(w)/∂:  (";
-            for(size_t i=0; i<a_t.size(); ++i)
-                std::cout << a_t[i] << " ";
-            std::cout << ")" << std::endl;
-        }
-
-		// update smallest observed value of regularized L
-		minValue = std::min(minValue, L_w_tm1 + _parameter.lambda*0.5*dot(w_tm1, w_tm1));
-
-        if(_parameter.verbose_)
-            std::cout << " min_i L(w_i) + ½λ|w_i|² is: " << minValue << std::endl;
-
-		// compute hyperplane offset
-		T b_t = L_w_tm1 - dot(w_tm1, a_t);
-
-        if(_parameter.verbose_){
-            std::cout << "adding hyperplane: ( ";
-            for(size_t i=0; i<a_t.size(); ++i)
-                std::cout << a_t[i] << " ";
-            std::cout << ")*w + " << b_t << std::endl;
-        }
-
-		// update lower bound
-		_bundleCollector.addHyperplane(a_t, b_t);
-
-		// minimal value of lower bound
-		T minLower;
-
-        // update w and get minimal value
-		findMinLowerBound(w, minLower);
-
-        // norm of w
-        double norm = 0.0;
-        for(size_t i=0; i<w.size(); ++i)
-            norm += w[i]*w[i];
-        norm = std::sqrt(norm);
-
-        if(_parameter.verbose_){
-            std::cout << " min_w ℒ(w)   + ½λ|w|²   is: " << minLower << std::endl;
-            std::cout << " w* of ℒ(w)   + ½λ|w|²   is: (";
-            for(size_t i=0; i<w.size(); ++i)
-                std::cout << w[i] << " ";
-            std::cout << ")              normalized: (";
-            for(size_t i=0; i<w.size(); ++i)
-                std::cout << w[i]/norm << " ";
-            std::cout << ")" << std::endl;
-        }
-
-		// compute gap
-		T eps_t;
-		if (_parameter.epsStrategy == EpsFromGap)
-			eps_t = minValue - minLower;
-		else
-			eps_t = minLower - lastMinLower;
-
-		lastMinLower = minLower;
-
-        if(_parameter.verbose_)
-            std::cout  << "          ε   is: " << eps_t << std::endl;
-
-		// converged?
-		if (eps_t <= _parameter.min_eps)
-			break;
-	}
-
-	return ReachedMinGap;
-}
-
-template <typename T>
-template <typename Weights>
-void
-BundleOptimizer<T>::setupQp(const Weights& w) {
-
-	/*
-	  w* = argmin λ½|w|² + ξ, s.t. <w,a_i> + b_i ≤ ξ ∀i
-	*/
-
-	if (!_solver)
-		_solver = solver::QuadraticSolverFactory::Create();
-
-	_solver->initialize(w.numberOfWeights() + 1, solver::Continuous);
-
-	// one variable for each component of w and for ξ
-    solver::QuadraticObjective obj(w.numberOfWeights() + 1);
-
-	// regularizer
-    for (unsigned int i = 0; i < w.numberOfWeights(); i++)
-		obj.setQuadraticCoefficient(i, i, 0.5*_parameter.lambda);
-
-	// ξ
-    obj.setCoefficient(w.numberOfWeights(), 1.0);
-
-	// we minimize
-	obj.setSense(solver::Minimize);
-
-	// we are done with the objective -- this does not change anymore
-	_solver->setObjective(obj);
-}
-
-template <typename T>
-template <typename ModelWeights>
-void
-BundleOptimizer<T>::findMinLowerBound(ModelWeights& w, T& value) {
-
-	_solver->setConstraints(_bundleCollector.getConstraints());
-
-	solver::Solution x;
-	std::string msg;
-	bool optimal = _solver->solve(x, value, msg);
-
-	if (!optimal) {
-
-		std::cerr
-				<< "[BundleOptimizer] QP could not be solved to optimality: "
-				<< msg << std::endl;
-
-		return;
-	}
-
-	for (size_t i = 0; i < w.numberOfWeights(); i++)
-		w[i] = x[i];
-}
-
-template <typename T>
-template <typename ModelWeights>
-T
-BundleOptimizer<T>::dot(const ModelWeights& a, const ModelWeights& b) {
-
-	OPENGM_ASSERT(a.numberOfWeights() == b.numberOfWeights());
-
-	T d = 0.0;
-	for (size_t i = 0; i < a.numberOfWeights(); i++)
-		d += a[i]*b[i];
-
-	return d;
-}
-
-} // learning
-
-} // opengm
-
-#endif // OPENGM_LEARNING_BUNDLE_OPTIMIZER_HXX
-
diff --git a/include/opengm/learning/dataset/dataset.hxx b/include/opengm/learning/dataset/dataset.hxx
deleted file mode 100644
index e315100..0000000
--- a/include/opengm/learning/dataset/dataset.hxx
+++ /dev/null
@@ -1,234 +0,0 @@
-#pragma once
-#ifndef OPENGM_DATASET_HXX
-#define OPENGM_DATASET_HXX
-
-#include <vector>
-#include <cstdlib>
-
-#include "../../graphicalmodel/weights.hxx"
-#include "../../functions/unary_loss_function.hxx"
-#include "../loss/noloss.hxx"
-
-namespace opengm {
-   namespace datasets{
-     
-    template<class GM>
-    struct DefaultLossGm{
-
-        // make the graphical model with loss
-        typedef typename GM::SpaceType         SpaceType;
-        typedef typename GM::ValueType         ValueType;
-        typedef typename GM::IndexType         IndexType;
-        typedef typename GM::LabelType         LabelType;
-        typedef typename GM::OperatorType      OperatorType;
-        typedef typename GM::FunctionTypeList  OrgFunctionTypeList;
-
-        // extend the typelist
-        typedef typename opengm::meta::TypeListGenerator<
-            opengm::ExplicitFunction<ValueType,IndexType,LabelType>, 
-            opengm::UnaryLossFunction<ValueType,IndexType,LabelType>
-        >::type LossOnlyFunctionTypeList;
-
-        typedef typename opengm::meta::MergeTypeListsNoDuplicates<
-            OrgFunctionTypeList,LossOnlyFunctionTypeList
-        >::type CombinedList;
-        // loss graphical model type
-
-        typedef GraphicalModel<ValueType, OperatorType, CombinedList, SpaceType> type;
-    };
-
-    template<class GM, class LOSS=opengm::learning::NoLoss, class LOSS_GM = DefaultLossGm<GM> >
-    class Dataset{
-    public:
-        typedef GM                       GMType;
-
-        // generate the gm with loss here atm (THIS IS WRONG)
-        typedef typename opengm::meta::EvalIf<
-        opengm::meta::Compare<LOSS_GM, DefaultLossGm<GM> >::value,
-        DefaultLossGm<GM>,
-        meta::Self<LOSS_GM>
-        >::type GMWITHLOSS;
-
-        //typedef GM                       GMWITHLOSS;
-        typedef LOSS                     LossType;
-        typedef typename LOSS::Parameter LossParameterType;
-        typedef typename GM::ValueType   ValueType;
-        typedef typename GM::IndexType   IndexType;
-        typedef typename GM::LabelType   LabelType;
-
-
-        typedef opengm::learning::Weights<ValueType> Weights;
-        typedef opengm::learning::WeightConstraints<ValueType> WeightConstraintsType;
-
-
-        bool                          lockModel(const size_t i)               { ++count_[i]; }
-        bool                          unlockModel(const size_t i)             { OPENGM_ASSERT(count_[i]>0); --count_[i]; }
-        const GM&                     getModel(const size_t i) const          { return gms_[i]; } 
-        const GMWITHLOSS&             getModelWithLoss(const size_t i)const   { return gmsWithLoss_[i]; }
-        const LossParameterType&      getLossParameters(const size_t i)const  { return lossParams_[i]; }
-        const std::vector<LabelType>& getGT(const size_t i) const             { return gts_[i]; }
-        Weights&                      getWeights()                            { return weights_; } 
-        size_t                        getNumberOfWeights() const              { return weights_.numberOfWeights(); }
-        size_t                        getNumberOfModels() const               { return gms_.size(); } 
-
-        template<class INF>
-        ValueType                     getTotalLoss(const typename INF::Parameter& para) const;
-
-        template<class INF>
-        ValueType                     getTotalLossParallel(const typename INF::Parameter& para) const;
-
-        template<class INF>
-        ValueType                     getLoss(const typename INF::Parameter& para, const size_t i) const;
-        ValueType                     getLoss(std::vector<LabelType> conf , const size_t i) const;
-
-        Dataset(size_t numInstances);
-
-        Dataset(const Weights & weights = Weights(),const WeightConstraintsType & weightConstraints = WeightConstraintsType(),size_t numInstances=0);
-
-        //void loadAll(std::string path,std::string prefix); 
-
-        friend class DatasetSerialization;
-        // friend void loadAll<Dataset<GM,LOSS> > (const std::string datasetpath, const std::string prefix, Dataset<GM,LOSS>& ds);
-
-        //~Dataset(){
-        //    std::cout<<"KILL DATASET\n";
-        //}
-    protected:	
-        std::vector<size_t> count_;
-        std::vector<bool> isCached_;
-        std::vector<GM> gms_; 
-        std::vector<GMWITHLOSS> gmsWithLoss_; 
-        std::vector<LossParameterType> lossParams_;
-        std::vector<std::vector<LabelType> > gts_;
-        Weights weights_;
-        WeightConstraintsType weightConstraints_;
-
-
-        void buildModelWithLoss(size_t i);
-    };
-      
-
-    template<class GM, class LOSS, class LOSS_GM>
-    Dataset<GM, LOSS, LOSS_GM>::Dataset(size_t numInstances)
-    : count_(std::vector<size_t>(numInstances)),
-        isCached_(std::vector<bool>(numInstances)),
-        gms_(std::vector<GM>(numInstances)),
-        gmsWithLoss_(std::vector<GMWITHLOSS>(numInstances)),
-        lossParams_(std::vector<LossParameterType>(numInstances)),
-        gts_(std::vector<std::vector<LabelType> >(numInstances)),
-        weights_(0),
-        weightConstraints_()
-    {
-    }
-
-    template<class GM, class LOSS, class LOSS_GM>
-    Dataset<GM, LOSS, LOSS_GM>::Dataset(
-        const Weights & weights, 
-        const WeightConstraintsType & weightConstraints,
-        size_t numInstances
-    ):  count_(std::vector<size_t>(numInstances)),
-        isCached_(std::vector<bool>(numInstances)),
-        gms_(std::vector<GM>(numInstances)),
-        gmsWithLoss_(std::vector<GMWITHLOSS>(numInstances)),
-        lossParams_(std::vector<LossParameterType>(numInstances)),
-        gts_(std::vector<std::vector<LabelType> >(numInstances)),
-        weights_(weights),
-        weightConstraints_(weightConstraints)
-    {
-    }
-
-
-    template<class GM, class LOSS, class LOSS_GM>
-    template<class INF>
-    typename GM::ValueType Dataset<GM, LOSS, LOSS_GM>::getTotalLoss(const typename INF::Parameter& para) const {
-        ValueType sum=0;
-        for(size_t i=0; i<this->getNumberOfModels(); ++i) {
-            sum += this->getLoss<INF>(para, i);
-        }
-        return sum;
-    }
-    template<class GM, class LOSS, class LOSS_GM>
-    template<class INF>
-    typename GM::ValueType Dataset<GM, LOSS, LOSS_GM>::getTotalLossParallel(const typename INF::Parameter& para) const {
-        double totalLoss = 0;
-        #pragma omp parallel for reduction(+:totalLoss)  
-        for(size_t i=0; i<this->getNumberOfModels(); ++i) {
-            totalLoss = totalLoss + this->getLoss<INF>(para, i);
-        }
-        return totalLoss;
-    }
-
-    template<class GM, class LOSS, class LOSS_GM>
-    template<class INF>
-    typename GM::ValueType Dataset<GM, LOSS, LOSS_GM>::getLoss(const typename INF::Parameter& para, const size_t i) const {
-        LOSS lossFunction(lossParams_[i]);
-        const GM& gm = this->getModel(i);
-        const std::vector<typename INF::LabelType>& gt =  this->getGT(i);
-
-        std::vector<typename INF::LabelType> conf;
-        INF inf(gm,para);
-        inf.infer();
-        inf.arg(conf);
-
-        return lossFunction.loss(gm, conf.begin(), conf.end(), gt.begin(), gt.end());
-
-    }
-
-    template<class GM, class LOSS, class LOSS_GM>
-    typename GM::ValueType Dataset<GM, LOSS, LOSS_GM>::getLoss(std::vector<typename GM::LabelType> conf, const size_t i) const {
-        LOSS lossFunction(lossParams_[i]);
-        const GM& gm = this->getModel(i);
-        const std::vector<LabelType>& gt =  this->getGT(i);
-        return lossFunction.loss(gm, conf.begin(), conf.end(), gt.begin(), gt.end());
-    }
-
-
-
-
-    template<class GM, class LOSS, class LOSS_GM>
-    void Dataset<GM, LOSS, LOSS_GM>::buildModelWithLoss(size_t i){
-        OPENGM_ASSERT_OP(i, <, lossParams_.size());
-        OPENGM_ASSERT_OP(i, <, gmsWithLoss_.size());
-        OPENGM_ASSERT_OP(i, <, gms_.size());
-        OPENGM_ASSERT_OP(i, <, gts_.size());
-        //std::cout<<"copy gm\n";
-        gmsWithLoss_[i] = gms_[i];    
-        //std::cout<<"copy done\n";
-        LOSS loss(lossParams_[i]);         
-        OPENGM_CHECK_OP(gts_[i].size(),==, gmsWithLoss_[i].numberOfVariables(),"");
-        loss.addLoss(gmsWithLoss_[i], gts_[i].begin());
-    }
-
-    // template<class GM, class LOSS, class LOSS_GM>
-    // void Dataset<GM, LOSS, LOSS_GM>::loadAll(std::string datasetpath,std::string prefix){
-    //     //Load Header 
-    //     std::stringstream hss;
-    //     hss << datasetpath << "/"<<prefix<<"info.h5";
-    //     hid_t file =  marray::hdf5::openFile(hss.str());
-    //     std::vector<size_t> temp(1);
-    //     marray::hdf5::loadVec(file, "numberOfWeights", temp);
-    //     size_t numWeights = temp[0];
-    //     marray::hdf5::loadVec(file, "numberOfModels", temp);
-    //     size_t numModel = temp[0];
-    //     marray::hdf5::closeFile(file);
-
-    //     gms_.resize(numModel); 
-    //     gmsWithLoss_.resize(numModel);
-    //     gt_.resize(numModel);
-    //     weights_ = Weights(numWeights);
-    //     //Load Models and ground truth
-    //     for(size_t m=0; m<numModel; ++m){
-    //         std::stringstream ss;
-    //         ss  << datasetpath <<"/"<<prefix<<"gm_" << m <<".h5"; 
-    //         hid_t file =  marray::hdf5::openFile(ss.str()); 
-    //         marray::hdf5::loadVec(file, "gt", gt_[m]);
-    //         marray::hdf5::closeFile(file);
-    //         opengm::hdf5::load(gms_[m],ss.str(),"gm"); 
-    //         buildModelWithLoss(m);
-    //     }
-    // };
-
-}
-} // namespace opengm
-
-#endif 
diff --git a/include/opengm/learning/dataset/dataset_io.hxx b/include/opengm/learning/dataset/dataset_io.hxx
deleted file mode 100644
index e526c09..0000000
--- a/include/opengm/learning/dataset/dataset_io.hxx
+++ /dev/null
@@ -1,134 +0,0 @@
-#pragma once
-#ifndef OPENGM_DATASET_IO_HXX
-#define OPENGM_DATASET_IO_HXX
-
-#include <vector>
-#include <cstdlib>
-#include <opengm/graphicalmodel/graphicalmodel_hdf5.hxx>
-#include <opengm/opengm.hxx>
-#include "opengm/learning/loss/generalized-hammingloss.hxx"
-#include "opengm/learning/loss/hammingloss.hxx"
-#include "opengm/learning/loss/noloss.hxx"
-//#include <H5Cpp.h>
-
-namespace opengm{
-   namespace datasets{
-
-      template <class W>
-      struct WeightSetter {
-         public:
-           WeightSetter(W& w) : weights_(w) {}
-
-           template<class F>
-           void operator()(F& f) const { f.setWeights(weights_); }
-
-         private:
-           W& weights_;
-      };
-
-      class DatasetSerialization{
-      public:
-         template<class DATASET>
-         static void save(const DATASET& dataset, const std::string datasetpath, const std::string prefix=""); 
-         template<class DATASET>
-         static void loadAll(const std::string datasetpath, const std::string prefix,  DATASET& dataset);  
-      };
-
-      template<class DATASET>
-      void DatasetSerialization::save(const DATASET& dataset, const std::string datasetpath, const std::string prefix) {
-         typedef typename DATASET::GMType   GMType;
-         typedef typename DATASET::LossParameterType LossParameterType;
-         typedef typename GMType::LabelType LabelType; 
-         typedef typename GMType::ValueType ValueType;
-
-         std::vector<size_t> numWeights(1,dataset.getNumberOfWeights());
-         std::vector<size_t> numModels(1,dataset.getNumberOfModels());
-  
-         std::stringstream hss;
-         hss << datasetpath << "/"<<prefix<<"info.h5";
-         hid_t file = marray::hdf5::createFile(hss.str(), marray::hdf5::DEFAULT_HDF5_VERSION);
-         marray::hdf5::save(file,"numberOfWeights",numWeights);
-         marray::hdf5::save(file,"numberOfModels",numModels);
-         marray::hdf5::closeFile(file); 
-
-         for(size_t m=0; m<dataset.getNumberOfModels(); ++m){
-            const GMType&                 gm = dataset.getModel(m); 
-            const std::vector<LabelType>& gt = dataset.getGT(m);
-            const LossParameterType&      lossParam = dataset.getLossParameters(m);
-            std::stringstream ss;
-            ss  << datasetpath <<"/"<<prefix<<"gm_" << m <<".h5"; 
-            opengm::hdf5::save(gm, ss.str(), "gm");
-            hid_t file = marray::hdf5::openFile(ss.str(), marray::hdf5::READ_WRITE);
-
-            //marray::Vector<LabelType> mgt(gt.size());
-            //std::copy(gt.begin(), gt.end(), mgt.begin());
-
-            marray::hdf5::save(file,"gt",gt);
-            hid_t lossGrp = marray::hdf5::createGroup(file,"loss");
-
-            lossParam.save(lossGrp);
-            marray::hdf5::closeGroup(lossGrp);
-            marray::hdf5::closeFile(file);
-         }
-
-      }
-
-      template<class DATASET>
-      void DatasetSerialization::loadAll(const std::string datasetpath, const std::string prefix, DATASET& dataset) {  
-         typedef typename DATASET::GMType   GMType;
-         typedef typename GMType::LabelType LabelType; 
-         typedef typename GMType::ValueType ValueType;
-         typedef typename DATASET::LossParameterType LossParameterType;
-         
-         //Load Header 
-         std::stringstream hss;
-         hss << datasetpath << "/"<<prefix<<"info.h5";
-         hid_t file =  marray::hdf5::openFile(hss.str());
-         std::vector<size_t> temp(1);
-         marray::hdf5::loadVec(file, "numberOfWeights", temp);
-         size_t numWeights = temp[0];
-         marray::hdf5::loadVec(file, "numberOfModels", temp);
-         size_t numModel = temp[0];
-         marray::hdf5::closeFile(file);
-         
-         dataset.gms_.resize(numModel); 
-         dataset.gmsWithLoss_.resize(numModel);
-         dataset.gts_.resize(numModel);
-         dataset.lossParams_.resize(numModel);
-         dataset.count_.resize(numModel);
-         dataset.isCached_.resize(numModel);
-         dataset.weights_ = opengm::learning::Weights<ValueType>(numWeights);
-         OPENGM_ASSERT_OP(dataset.lossParams_.size(), ==, numModel);
-         WeightSetter<opengm::learning::Weights<ValueType> > wSetter(dataset.weights_);
-
-         //Load Models and ground truth
-         for(size_t m=0; m<numModel; ++m){
-            std::stringstream ss;
-            ss  << datasetpath <<"/"<<prefix<<"gm_" << m <<".h5"; 
-            hid_t file =  marray::hdf5::openFile(ss.str()); 
-            marray::hdf5::loadVec(file, "gt", dataset.gts_[m]);
-            opengm::hdf5::load(dataset.gms_[m],ss.str(),"gm");
-
-            for(size_t fi = 0; fi < dataset.gms_[m].numberOfFactors(); ++fi) {
-                dataset.gms_[m][fi].callFunctor(wSetter);
-            }
-
-            LossParameterType lossParam;
-            hid_t lossGrp = marray::hdf5::openGroup(file, "loss");
-            lossParam.load(lossGrp);
-            std::vector<std::size_t> lossId;
-            marray::hdf5::loadVec(lossGrp, "lossId", lossId);
-            OPENGM_CHECK_OP(lossId.size(), ==, 1, "");
-            OPENGM_CHECK_OP(lossParam.getLossId(), ==, lossId[0],"the dataset needs to be initialized with the same loss type as saved");
-            dataset.lossParams_[m] = lossParam;
-
-            OPENGM_CHECK_OP(dataset.gts_[m].size(), == ,dataset.gms_[m].numberOfVariables(), "");
-            marray::hdf5::closeFile(file);            
-            dataset.buildModelWithLoss(m);
-         }
-      }
-
-   }
-}
-
-#endif
diff --git a/include/opengm/learning/dataset/editabledataset.hxx b/include/opengm/learning/dataset/editabledataset.hxx
deleted file mode 100644
index 1c940b9..0000000
--- a/include/opengm/learning/dataset/editabledataset.hxx
+++ /dev/null
@@ -1,146 +0,0 @@
-#pragma once
-#ifndef OPENGM_EDITABLEDATASET_HXX
-#define OPENGM_EDITABLEDATASET_HXX
-
-#include <vector>
-#include <cstdlib>
-
-#include <opengm/learning/dataset/dataset.hxx>
-#include "../../graphicalmodel/weights.hxx"
-#include "../loss/noloss.hxx"
-
-namespace opengm {
-namespace datasets{
-
-    // template< typename Weights >
-    // struct LinkWeights{
-
-    //     Weights& w_;
-    //     LinkWeights(const Weights& w):w_(w){}
-
-    //     template<class FUNCTION>
-    //     void operator()(const FUNCTION & function)
-    //     {
-    //         function.setWeights(w_);
-    //     }
-    // };
-
-    template<class GM, class LOSS, class LOSS_GM = DefaultLossGm<GM> >
-    class EditableDataset : public Dataset<GM, LOSS, LOSS_GM>{
-    public:
-        typedef GM                     GMType;
-        typedef typename Dataset<GM, LOSS, LOSS_GM>::GMWITHLOSS   GMWITHLOSS;
-        typedef LOSS                   LossType;
-        typedef typename LOSS::Parameter LossParameterType;
-        typedef typename GM::ValueType ValueType;
-        typedef typename GM::IndexType IndexType;
-        typedef typename GM::LabelType LabelType;
-
-        typedef opengm::learning::Weights<ValueType> Weights;
-        typedef opengm::learning::WeightConstraints<ValueType> WeightConstraintsType;
-
-        typedef std::vector<LabelType> GTVector;
-
-        EditableDataset(size_t numInstances) : Dataset<GM, LOSS,LOSS_GM>(numInstances) {}
-        EditableDataset(std::vector<GM>& gms, std::vector<GTVector >& gts, std::vector<LossParameterType>& lossParams);
-
-        EditableDataset(const Weights & weights = Weights(),const WeightConstraintsType & weightConstraints = WeightConstraintsType(),size_t numInstances=0)
-        :   Dataset<GM, LOSS, LOSS_GM>(weights, weightConstraints, numInstances){
-
-        }
-
-
-        void setInstance(const size_t i, const GM& gm, const GTVector& gt, const LossParameterType& p=LossParameterType());
-        void setGT(const size_t i, const GTVector& gt);
-        void pushBackInstance(const GM& gm, const GTVector& gt, const LossParameterType& p=LossParameterType());
-        void setWeights(Weights& w);
-
-
-        void setWeightConstraints(const WeightConstraintsType & weightConstraints);
-
-    };
-
-    template<class GM, class LOSS, class LOSS_GM>
-    EditableDataset<GM, LOSS, LOSS_GM>::EditableDataset(
-        std::vector<GM>& gms,
-        std::vector<GTVector >& gts,
-        std::vector<LossParameterType>& lossParams
-    )
-    :   Dataset<GM, LOSS, LOSS_GM>(gms.size())
-    {
-        for(size_t i=0; i<gms.size(); ++i){
-        setInstance(i, gms[i], gts[i], lossParams[i]);
-        this->buildModelWithLoss(i);
-    }
-    }
-
-
-
-
-
-    template<class GM, class LOSS, class LOSS_GM>
-    void EditableDataset<GM, LOSS, LOSS_GM>::setInstance(
-        const size_t i, 
-        const GM& gm, 
-        const GTVector& gt,
-        const LossParameterType& p
-    ) {
-        OPENGM_CHECK_OP(i, <, this->gms_.size(),"");
-        OPENGM_CHECK_OP(i, <, this->gts_.size(),"");
-        OPENGM_CHECK_OP(i, <, this->lossParams_.size(),"");
-        OPENGM_CHECK_OP(i, <, this->gmsWithLoss_.size(),"");
-        this->gms_[i] = gm;
-        this->gts_[i] = gt;
-        this->lossParams_[i] = p;
-        //std::cout<<"build model with loss\n";
-        this->buildModelWithLoss(i);
-        //std::cout<<"build model with loss DONE\n";
-    }
-
-    template<class GM, class LOSS, class LOSS_GM>
-    inline void EditableDataset<GM, LOSS, LOSS_GM>::setGT(
-        const size_t i, 
-        const GTVector& gt
-    ) {
-        OPENGM_CHECK_OP(i, <, this->gts_.size(),"");
-        this->gts_[i] = gt;
-        this->buildModelWithLoss(i);
-    }
-
-    template<class GM, class LOSS, class LOSS_GM>
-    void EditableDataset<GM, LOSS, LOSS_GM>::pushBackInstance(
-        const GM& gm, 
-        const GTVector& gt, 
-        const LossParameterType& p
-    ) {
-        this->gms_.push_back(gm);
-        this->gts_.push_back(gt);
-        this->lossParams_.push_back(p);
-        this->gmsWithLoss_.resize(this->gts_.size());
-        this->isCached_.resize(this->gts_.size());
-        this->count_.resize(this->gts_.size());
-        this->buildModelWithLoss(this->gts_.size()-1);        
-        OPENGM_CHECK_OP(this->gms_.size(), ==, this->gts_.size(),"");
-        OPENGM_CHECK_OP(this->gms_.size(), ==, this->lossParams_.size(),"");
-        OPENGM_CHECK_OP(this->gms_.size(), ==, this->gmsWithLoss_.size(),"");
-    }
-
-    template<class GM, class LOSS, class LOSS_GM>
-    inline void EditableDataset<GM, LOSS, LOSS_GM>::setWeights(
-        Weights& w
-    ) {
-        this->weights_ = w;
-    }
-
-    template<class GM, class LOSS, class LOSS_GM>
-    inline void EditableDataset<GM, LOSS, LOSS_GM>::setWeightConstraints(
-        const WeightConstraintsType & weightConstraints
-    ){
-        this->weightConstraints_ = weightConstraints;
-    }
-
-
-} // namespace datasets
-} // namespace opengm
-
-#endif 
diff --git a/include/opengm/learning/dataset/testdatasets.hxx b/include/opengm/learning/dataset/testdatasets.hxx
deleted file mode 100644
index e2f1e85..0000000
--- a/include/opengm/learning/dataset/testdatasets.hxx
+++ /dev/null
@@ -1,375 +0,0 @@
-#pragma once
-#ifndef OPENGM_TESTDATASETS_HXX
-#define OPENGM_TESTDATASETS_HXX
-
-#include <vector>
-#include <cstdlib>
-
-#include <opengm/learning/dataset/dataset.hxx>
-#include <opengm/learning/dataset/editabledataset.hxx>
-#include <opengm/functions/learnable/lpotts.hxx>
-#include <opengm/functions/learnable/lweightedsum_of_functions.hxx>
-
-namespace opengm {
-   namespace datasets{
-
-      template<class GM, class LOSS>
-      class TestDataset0 : public Dataset<GM,LOSS,GM>{ 
-      public:
-         typedef GM                     GMType;
-         typedef GM                     GMWITHLOSS;
-         typedef LOSS                   LossType;
-         typedef typename GM::ValueType ValueType;
-         typedef typename GM::IndexType IndexType;
-         typedef typename GM::LabelType LabelType;
-         typedef opengm::learning::Weights<ValueType> Weights;
-
-         TestDataset0(size_t numModels=5); 
-      };
-
-      template<class GM, class LOSS>
-      class TestDataset1 : public Dataset<GM,LOSS,GM>{ 
-      public:
-         typedef GM                     GMType;
-         typedef GM                     GMWITHLOSS;
-         typedef LOSS                   LossType;
-         typedef typename GM::ValueType ValueType;
-         typedef typename GM::IndexType IndexType;
-         typedef typename GM::LabelType LabelType;
-         typedef opengm::learning::Weights<ValueType> Weights;
-
-         TestDataset1(size_t numModels=5); 
-      };
-
-
-      template<class GM, class LOSS>
-      class TestDataset2 : public Dataset<GM,LOSS,GM>{ 
-      public:
-         typedef GM                     GMType;
-         typedef GM                     GMWITHLOSS;
-         typedef LOSS                   LossType;
-         typedef typename GM::ValueType ValueType;
-         typedef typename GM::IndexType IndexType;
-         typedef typename GM::LabelType LabelType;
-         typedef opengm::learning::Weights<ValueType> Weights;
-
-         TestDataset2(size_t numModels=4); 
-      };
-
-      template<class GM, class LOSS>
-      class TestDatasetSimple : public Dataset<GM,LOSS,GM>{ 
-      public:
-         typedef GM                     GMType;
-         typedef GM                     GMWITHLOSS;
-         typedef LOSS                   LossType;
-         typedef typename GM::ValueType ValueType;
-         typedef typename GM::IndexType IndexType;
-         typedef typename GM::LabelType LabelType;
-         typedef opengm::learning::Weights<ValueType> Weights;
-
-         TestDatasetSimple(size_t numModels=1); 
-      };
-
-      template<class GM, class LOSS>
-      class EditableTestDataset : public EditableDataset<GM,LOSS,GM>{ 
-      public:
-         typedef GM                     GMType;
-         typedef GM                     GMWITHLOSS;
-         typedef LOSS                   LossType;
-         typedef typename GM::ValueType ValueType;
-         typedef typename GM::IndexType IndexType;
-         typedef typename GM::LabelType LabelType;
-         typedef opengm::learning::Weights<ValueType> Weights;
-
-         EditableTestDataset(size_t numModels=5); 
-      };
-
-//***********************************
-//** IMPL TestDataset 0
-//***********************************
-      template<class GM, class LOSS>
-      TestDataset0<GM,LOSS>::TestDataset0(size_t numModels)
-      { 
-         this->lossParams_.resize(numModels);
-         this->isCached_.resize(numModels);
-         this->count_.resize(numModels,0);
-         this->weights_ = Weights(1);
-         LabelType numberOfLabels = 2;
-         this->gts_.resize(numModels,std::vector<LabelType>(64,0));
-         for(size_t m=0;m<numModels;++m){
-            for(size_t i=16; i<48; ++i){
-               this->gts_[m][i] = 1;
-            }
-         }
-         this->gms_.resize(numModels);
-         this->gmsWithLoss_.resize(numModels);
-         for(size_t m=0; m<numModels; ++m){
-            std::srand(m);
-            for (int j = 0; j < 64; j++)
-               this->gms_[m].addVariable(2);
-            for(size_t y = 0; y < 64; ++y){ 
-               // function
-               const size_t shape[] = {numberOfLabels};
-               ExplicitFunction<ValueType> f(shape, shape + 1);
-               ValueType val = (double)(this->gts_[m][y]) + (double)(std::rand()) / (double) (RAND_MAX) * 1.5 - 0.75 ;
-               f(0) = std::fabs(val-0);
-               f(1) = std::fabs(val-1);
-               typename GM::FunctionIdentifier fid =  this->gms_[m].addFunction(f);
-
-               // factor
-               size_t variableIndices[] = {y};
-               this->gms_[m].addFactor(fid, variableIndices, variableIndices + 1);         
-            }
-          
-            opengm::functions::learnable::LPotts<ValueType,IndexType,LabelType> f(this->weights_,2,std::vector<size_t>(1,0),std::vector<ValueType>(1,1));
-            typename GM::FunctionIdentifier fid = this->gms_[m].addFunction(f);      
-            for(size_t y = 0; y < 64; ++y){ 
-               if(y + 1 < 64) { // (x, y) -- (x, y + 1)
-                  size_t variableIndices[] = {y, y+1};
-                  //sort(variableIndices, variableIndices + 2);
-                  this->gms_[m].addFactor(fid, variableIndices, variableIndices + 2);
-               }
-            }
-            this->buildModelWithLoss(m);
-         }      
-      };
-
-//***********************************
-//** IMPL TestDataset 1
-//***********************************
-      template<class GM, class LOSS>
-      TestDataset1<GM,LOSS>::TestDataset1(size_t numModels)
-      { 
-         this->lossParams_.resize(numModels);
-         this->isCached_.resize(numModels);
-         this->count_.resize(numModels,0);
-         this->weights_ = Weights(1);
-         LabelType numberOfLabels = 2;
-         this->gts_.resize(numModels,std::vector<LabelType>(64*64,0));
-         for(size_t m=0;m<numModels;++m){
-            for(size_t i=32*64; i<64*64; ++i){
-               this->gts_[m][i] = 1;
-            }
-         }
-         this->gms_.resize(numModels);
-         this->gmsWithLoss_.resize(numModels);
-         for(size_t m=0; m<numModels; ++m){
-            std::srand(m);
-            for (int j = 0; j < 64*64; j++)
-               this->gms_[m].addVariable(2);
-            for(size_t y = 0; y < 64; ++y){ 
-               for(size_t x = 0; x < 64; ++x) {
-                  // function
-                  const size_t shape[] = {numberOfLabels};
-                  ExplicitFunction<ValueType> f(shape, shape + 1);
-                  ValueType val = (double)(this->gts_[m][y*64+x]) + (double)(std::rand()) / (double) (RAND_MAX) * 1.5 - 0.75 ;
-                  f(0) = std::fabs(val-0);
-                  f(1) = std::fabs(val-1);
-                  typename GM::FunctionIdentifier fid =  this->gms_[m].addFunction(f);
-
-                  // factor
-                  size_t variableIndices[] = {y*64+x};
-                  this->gms_[m].addFactor(fid, variableIndices, variableIndices + 1);
-               }
-            }
-          
-            opengm::functions::learnable::LPotts<ValueType,IndexType,LabelType> f(this->weights_,2,std::vector<size_t>(1,0),std::vector<ValueType>(1,1));
-            typename GM::FunctionIdentifier fid = this->gms_[m].addFunction(f);      
-            for(size_t y = 0; y < 64; ++y){ 
-               for(size_t x = 0; x < 64; ++x) {
-                  if(x + 1 < 64) { // (x, y) -- (x + 1, y)
-                     size_t variableIndices[] = {y*64+x, y*64+x+1};
-                     //sort(variableIndices, variableIndices + 2);
-                     this->gms_[m].addFactor(fid, variableIndices, variableIndices + 2);
-                  }
-                  if(y + 1 < 64) { // (x, y) -- (x, y + 1)
-                     size_t variableIndices[] = {y*64+x, (y+1)*64+x};
-                     //sort(variableIndices, variableIndices + 2);
-                     this->gms_[m].addFactor(fid, variableIndices, variableIndices + 2);
-                  }
-               }    
-            }
-            this->buildModelWithLoss(m);
-         }      
-      };
-
-//***********************************
-//** IMPL TestDataset 2
-//***********************************
-      template<class GM, class LOSS>
-      TestDataset2<GM,LOSS>::TestDataset2(size_t numModels)
-      { 
-         this->lossParams_.resize(numModels);
-         this->isCached_.resize(numModels);
-         this->count_.resize(numModels,0);
-         this->weights_ = Weights(3);
-         LabelType numberOfLabels = 2;
-         this->gts_.resize(numModels,std::vector<size_t>(64*64,0));
-         for(size_t m=0;m<numModels;++m){
-            for(size_t i=32*64; i<64*64; ++i){
-               this->gts_[m][i] = 1;
-            }
-         }
-         this->gms_.resize(numModels);
-         this->gmsWithLoss_.resize(numModels);
-         for(size_t m=0; m<numModels; ++m){
-            std::srand(m);
-            for (int j = 0; j < 64*64; j++)
-               this->gms_[m].addVariable(2);
-            for(size_t y = 0; y < 64; ++y){ 
-               for(size_t x = 0; x < 64; ++x) {
-                  // function
-                  const size_t numExperts = 2;
-                  const std::vector<size_t> shape(1,numberOfLabels);
-                  std::vector<marray::Marray<ValueType> > feat(numExperts,marray::Marray<ValueType>(shape.begin(), shape.end()));
-                  ValueType val0 = (double)(this->gts_[m][y*64+x]) + (double)(std::rand()) / (double) (RAND_MAX) * 1.0 - 0.5 ;
-                  feat[0](0) = std::fabs(val0-0);
-                  feat[0](1) = std::fabs(val0-1); 
-                  ValueType val1 = (double)(this->gts_[m][y*64+x]) + (double)(std::rand()) / (double) (RAND_MAX) * 2.0 - 1.0 ;
-                  feat[1](0) = std::fabs(val1-0);
-                  feat[1](1) = std::fabs(val1-1);
-                  std::vector<size_t> wID(2);
-                  wID[0]=1;  wID[1]=2;
-                  opengm::functions::learnable::LWeightedSumOfFunctions<ValueType,IndexType,LabelType> f(shape,this->weights_, wID, feat);
-                  typename GM::FunctionIdentifier fid =  this->gms_[m].addFunction(f);
-
-                  // factor
-                  size_t variableIndices[] = {y*64+x};
-                  this->gms_[m].addFactor(fid, variableIndices, variableIndices + 1);
-               }
-            }
-          
-            opengm::functions::learnable::LPotts<ValueType,IndexType,LabelType> f(this->weights_,2,std::vector<size_t>(1,0),std::vector<ValueType>(1,1));
-            typename GM::FunctionIdentifier fid = this->gms_[m].addFunction(f);      
-            for(size_t y = 0; y < 64; ++y){ 
-               for(size_t x = 0; x < 64; ++x) {
-                  if(x + 1 < 64) { // (x, y) -- (x + 1, y)
-                     size_t variableIndices[] = {y*64+x, y*64+x+1};
-                     //sort(variableIndices, variableIndices + 2);
-                     this->gms_[m].addFactor(fid, variableIndices, variableIndices + 2);
-                  }
-                  if(y + 1 < 64) { // (x, y) -- (x, y + 1)
-                     size_t variableIndices[] = {y*64+x, (y+1)*64+x};
-                     //sort(variableIndices, variableIndices + 2);
-                     this->gms_[m].addFactor(fid, variableIndices, variableIndices + 2);
-                  }
-               }    
-            }
-            this->buildModelWithLoss(m);
-         }
-      };
-
-//***********************************
-//** Embarrassingly simple dataset
-//***********************************
-      template<class GM, class LOSS>
-      TestDatasetSimple<GM,LOSS>::TestDatasetSimple(size_t numModels)
-      { 
-         this->lossParams_.resize(numModels);
-         this->isCached_.resize(numModels);
-         this->count_.resize(numModels,0);
-         this->weights_ = Weights(2);
-         LabelType numberOfLabels = 2;
-         this->gts_.resize(numModels,std::vector<size_t>(1,0));
-         for(size_t m=0; m<numModels; ++m){
-            this->gts_[m][0] = 0;
-         }
-         this->gms_.resize(numModels);
-         this->gmsWithLoss_.resize(numModels);
-         for(size_t m=0; m<numModels; ++m){
-            std::srand(m);
-            this->gms_[m].addVariable(2);
-
-			// function
-            const size_t numExperts = 2;
-            const std::vector<size_t> shape(1,numberOfLabels);
-            std::vector<marray::Marray<ValueType> > feat(numExperts,marray::Marray<ValueType>(shape.begin(), shape.end()));
-            ValueType val0 = 0.5;
-            feat[0](0) = val0;
-            feat[0](1) = val0-1; 
-            ValueType val1 = -0.25;
-            feat[1](0) = val1;
-            feat[1](1) = val1-1;
-            std::vector<size_t> wID(2);
-            wID[0]=0;  wID[1]=1;
-            opengm::functions::learnable::LWeightedSumOfFunctions<ValueType,IndexType,LabelType> f(shape,this->weights_, wID, feat);
-            typename GM::FunctionIdentifier fid =  this->gms_[m].addFunction(f);
-
-			// factor
-            size_t variableIndices[] = {0};
-            this->gms_[m].addFactor(fid, variableIndices, variableIndices + 1);
-
-            this->buildModelWithLoss(m);
-         }
-      };
- 
-//***********************************
-//** IMPL TestDataset 2 (editable)
-//***********************************
-      template<class GM, class LOSS>
-      EditableTestDataset<GM,LOSS>::EditableTestDataset(size_t numModels)
-      { 
-         this->lossParams_.resize(numModels);
-         this->count_.resize(numModels,0);
-         this->weights_ = Weights(3);
-         LabelType numberOfLabels = 2;
-         this->gts_.resize(numModels,std::vector<size_t>(64*64,0));
-         for(size_t m=0;m<numModels;++m){
-            for(size_t i=32*64; i<64*64; ++i){
-               this->gts_[m][i] = 1;
-            }
-         }
-         this->gms_.resize(numModels);
-         this->gmsWithLoss_.resize(numModels);
-         for(size_t m=0; m<numModels; ++m){
-            std::srand(m);
-            for (int j = 0; j < 64*64; j++)
-               this->gms_[m].addVariable(2);
-            for(size_t y = 0; y < 64; ++y){ 
-               for(size_t x = 0; x < 64; ++x) {
-                  // function
-                  const size_t numExperts = 2;
-                  const std::vector<size_t> shape(1,numberOfLabels);
-                  std::vector<marray::Marray<ValueType> > feat(numExperts,marray::Marray<ValueType>(shape.begin(), shape.end()));
-                  ValueType val0 = (double)(this->gts_[m][y*64+x]) + (double)(std::rand()) / (double) (RAND_MAX) * 1.0 - 0.5 ;
-                  feat[0](0) = std::fabs(val0-0);
-                  feat[0](1) = std::fabs(val0-1); 
-                  ValueType val1 = (double)(this->gts_[m][y*64+x]) + (double)(std::rand()) / (double) (RAND_MAX) * 2.0 - 1.0 ;
-                  feat[1](0) = std::fabs(val1-0);
-                  feat[1](1) = std::fabs(val1-1);
-                  std::vector<size_t> wID(2);
-                  wID[0]=1;  wID[1]=2;
-                  opengm::functions::learnable::LWeightedSumOfFunctions<ValueType,IndexType,LabelType> f(shape,this->weights_, wID, feat);
-                  typename GM::FunctionIdentifier fid =  this->gms_[m].addFunction(f);
-
-                  // factor
-                  size_t variableIndices[] = {y*64+x};
-                  this->gms_[m].addFactor(fid, variableIndices, variableIndices + 1);
-               }
-            }
-          
-            opengm::functions::learnable::LPotts<ValueType,IndexType,LabelType> f(this->weights_,2,std::vector<size_t>(1,0),std::vector<ValueType>(1,1));
-            typename GM::FunctionIdentifier fid = this->gms_[m].addFunction(f);      
-            for(size_t y = 0; y < 64; ++y){ 
-               for(size_t x = 0; x < 64; ++x) {
-                  if(x + 1 < 64) { // (x, y) -- (x + 1, y)
-                     size_t variableIndices[] = {y*64+x, y*64+x+1};
-                     //sort(variableIndices, variableIndices + 2);
-                     this->gms_[m].addFactor(fid, variableIndices, variableIndices + 2);
-                  }
-                  if(y + 1 < 64) { // (x, y) -- (x, y + 1)
-                     size_t variableIndices[] = {y*64+x, (y+1)*64+x};
-                     //sort(variableIndices, variableIndices + 2);
-                     this->gms_[m].addFactor(fid, variableIndices, variableIndices + 2);
-                  }
-               }    
-            }
-            this->buildModelWithLoss(m);
-         }
-      };
-
-
-   }
-} // namespace opengm
-
-#endif 
diff --git a/include/opengm/learning/gradient-accumulator.hxx b/include/opengm/learning/gradient-accumulator.hxx
deleted file mode 100644
index 88e920a..0000000
--- a/include/opengm/learning/gradient-accumulator.hxx
+++ /dev/null
@@ -1,175 +0,0 @@
-#ifndef OPENGM_LEARNING_GRADIENT_ACCUMULATOR_H__
-#define OPENGM_LEARNING_GRADIENT_ACCUMULATOR_H__
-
-namespace opengm {
-namespace learning {
-
-/**
- * Model function visitor to accumulate the gradient for each model weight, 
- * given a configuration.
- */
-template <typename ModelWeights, typename ConfigurationType>
-class GradientAccumulator {
-    typedef typename ConfigurationType::const_iterator ConfIter;
-
-public:
-
-    /**
-     * How to accumulate the gradient on the provided ModelWeights.
-     */
-    enum Mode {
-
-        Add,
-
-        Subtract
-    };
-
-    /**
-     * @param gradient
-     *              ModelWeights reference to store the gradients. Gradient 
-     *              values will only be added (or subtracted, if mode == 
-     *              Subtract), so you have to make sure gradient is properly 
-     *              initialized to zero.
-     *
-     * @param configuration
-     *              Configuration of the variables in the model, to evaluate the 
-     *              gradient for.
-     *
-     * @param mode
-     *              Add or Subtract the weight gradients from gradient.
-     */
-    GradientAccumulator(ModelWeights& gradient, const ConfigurationType& configuration, Mode mode = Add) :
-        _gradient(gradient),
-        _configuration(configuration),
-        _mode(mode) {}
-
-    template <typename Iterator, typename FunctionType>
-    void operator()(Iterator begin, Iterator end, const FunctionType& function) {
-
-        typedef opengm::SubsetAccessor<Iterator, ConfIter> Accessor;
-        typedef opengm::AccessorIterator<Accessor, true> Iter;
-        const Accessor accessor(begin, end, _configuration.begin());
-
-        for (int i = 0; i < function.numberOfWeights(); i++) {
-
-            int index = function.weightIndex(i);
-            double g = function.weightGradient(i, Iter(accessor, 0));
-            if (_mode == Add)
-                _gradient[index] += g;
-            else
-                _gradient[index] -= g;
-        }
-    }
-
-private:
-
-    ModelWeights& _gradient;
-    const ConfigurationType& _configuration;
-    Mode _mode;
-};
-
-
-template<class GM, class LABEL_ITER>
-struct FeatureAccumulator{
-
-    typedef typename GM::LabelType LabelType;
-    typedef typename GM::IndexType IndexType;
-    typedef typename GM::ValueType ValueType;
-    
-
-
-    FeatureAccumulator(const size_t nW, bool add = true)
-    :   accWeights_(nW),
-        gtLabel_(),
-        mapLabel_(),
-        add_(add),
-        weight_(1.0)
-        {
-
-        for(size_t i=0; i<accWeights_.size(); ++i){
-            accWeights_[i] = 0.0;
-        }
-    }
-
-    void setLabels(const LABEL_ITER gtLabel, const LABEL_ITER mapLabel){
-        gtLabel_ = gtLabel;
-        mapLabel_  = mapLabel;
-    }
-
-    void resetWeights(){
-        for(size_t i=0; i<accWeights_.size(); ++i){
-            accWeights_[i] = 0.0;
-        }
-    }
-    const Weights<double> &  getWeights(const size_t wi)const{
-        accWeights_;
-    }
-    double getWeight(const size_t wi)const{
-        return accWeights_[wi];
-    }
-    template<class Iter, class F>
-    void operator()(Iter begin, Iter end, const F & f){
-
-        typedef opengm::SubsetAccessor<Iter, LABEL_ITER> Accessor;
-        typedef opengm::AccessorIterator<Accessor, true> AccessorIter;
-
-        // get the number of weights_
-        const size_t nWeights = f.numberOfWeights();
-        if(nWeights>0){
-            // loop over all weights
-            for(size_t wi=0; wi<nWeights; ++wi){
-                // accumulate features for both labeling
-                const size_t gwi = f.weightIndex(wi);
-
-
-                const Accessor accessorGt(begin, end, gtLabel_);
-                const Accessor accessorMap(begin, end, mapLabel_);
-
-                if(add_){
-                    // for gt label
-                    accWeights_[gwi] += weight_*f.weightGradient(wi, AccessorIter(accessorGt, 0));
-                    // for test label
-                    accWeights_[gwi] -= weight_*f.weightGradient(wi, AccessorIter(accessorMap, 0));
-                }
-                else{
-                    // for gt label
-                    accWeights_[gwi] -= weight_*f.weightGradient(wi, AccessorIter(accessorGt, 0));
-                    // for test label
-                    accWeights_[gwi] += weight_*f.weightGradient(wi, AccessorIter(accessorMap, 0));
-                }
-            }
-        }
-    }
-
-    void accumulateFromOther(const FeatureAccumulator & otherAcc){
-        for(size_t i=0; i<accWeights_.size(); ++i){
-            accWeights_[i] += otherAcc.accWeights_[i];
-        }
-    }
-
-    void accumulateModelFeatures(
-        const GM & gm, 
-        const LABEL_ITER & gtLabel,
-        const LABEL_ITER & mapLabel,
-        const double weight  = 1.0
-    ){
-        gtLabel_ = gtLabel;
-        mapLabel_  = mapLabel;
-        weight_ = weight;
-        // iterate over all factors
-        // and accumulate features
-        for(size_t fi=0; fi<gm.numberOfFactors(); ++fi){
-            gm[fi].callViFunctor(*this);
-        }
-    }
-    opengm::learning::Weights<double>  accWeights_;
-    LABEL_ITER gtLabel_;
-    LABEL_ITER mapLabel_;
-    bool add_;
-    double weight_;
-};
-
-}} // namespace opengm::learning
-
-#endif // OPENGM_LEARNING_GRADIENT_ACCUMULATOR_H__
-
diff --git a/include/opengm/learning/gridsearch-learning.hxx b/include/opengm/learning/gridsearch-learning.hxx
deleted file mode 100644
index 750c844..0000000
--- a/include/opengm/learning/gridsearch-learning.hxx
+++ /dev/null
@@ -1,126 +0,0 @@
-#pragma once
-#ifndef OPENGM_GRIDSEARCH_LEARNER_HXX
-#define OPENGM_GRIDSEARCH_LEARNER_HXX
-
-#include <vector>
-
-namespace opengm {
-   namespace learning {
-
-      
-      template<class DATASET>
-      class GridSearchLearner
-      {
-      public: 
-         typedef DATASET DatasetType;
-         typedef typename DATASET::GMType   GMType; 
-         typedef typename DATASET::LossType LossType;
-         typedef typename GMType::ValueType ValueType;
-         typedef typename GMType::IndexType IndexType;
-         typedef typename GMType::LabelType LabelType; 
-
-         class Parameter{
-         public:
-            std::vector<double> parameterUpperbound_; 
-            std::vector<double> parameterLowerbound_;
-            std::vector<size_t> testingPoints_;
-            Parameter(){;}
-         };
-
-
-         GridSearchLearner(DATASET&, const Parameter& );
-
-         template<class INF>
-         void learn(const typename INF::Parameter& para); 
-         //template<class INF, class VISITOR>
-         //void learn(typename INF::Parameter para, VITITOR vis);
-
-         const opengm::learning::Weights<double>& getWeights(){return weights_;}
-         Parameter& getLerningParameters(){return para_;}
-
-      private:
-         DATASET& dataset_;
-         opengm::learning::Weights<double> weights_;
-         Parameter para_;
-      }; 
-
-      template<class DATASET>
-      GridSearchLearner<DATASET>::GridSearchLearner(DATASET& ds, const Parameter& p )
-         : dataset_(ds), para_(p)
-      {
-         weights_ = opengm::learning::Weights<double>(ds.getNumberOfWeights());
-         if(para_.parameterUpperbound_.size() != ds.getNumberOfWeights())
-            para_.parameterUpperbound_.resize(ds.getNumberOfWeights(),10.0);
-         if(para_.parameterLowerbound_.size() != ds.getNumberOfWeights())
-            para_.parameterLowerbound_.resize(ds.getNumberOfWeights(),0.0);
-         if(para_.testingPoints_.size() != ds.getNumberOfWeights())
-            para_.testingPoints_.resize(ds.getNumberOfWeights(),10);
-      }
-
-
-      template<class DATASET>
-      template<class INF>
-      void GridSearchLearner<DATASET>::learn(const typename INF::Parameter& para){
-         // generate model Parameters
-         opengm::learning::Weights<double> modelPara( dataset_.getNumberOfWeights() );
-         opengm::learning::Weights<double> bestModelPara( dataset_.getNumberOfWeights() );
-         double bestLoss = std::numeric_limits<double>::infinity();
-         std::vector<size_t> itC(dataset_.getNumberOfWeights(),0);
-         
-         bool search=true;
-         while(search){
-            // Get Parameter
-            for(size_t p=0; p<dataset_.getNumberOfWeights(); ++p){
-               modelPara.setWeight(p, para_.parameterLowerbound_[p] + double(itC[p])/double(para_.testingPoints_[p]-1)*(para_.parameterUpperbound_[p]-para_.parameterLowerbound_[p]) );
-            }
-            // Evaluate Loss
-            opengm::learning::Weights<double>& mp =  dataset_.getWeights();
-            mp = modelPara;
-            const double loss = dataset_. template getTotalLoss<INF>(para);
-           
-
-            // **************
-
-            if(loss<bestLoss){
-                 // *call visitor*
-                for(size_t p=0; p<dataset_.getNumberOfWeights(); ++p){
-                   std::cout << modelPara[p] <<" ";
-                }
-                std::cout << " ==> ";
-                std::cout << loss << std::endl;
-
-                bestLoss=loss;
-                bestModelPara=modelPara;
-                if(loss<=0.000000001){
-                    search = false;
-                }
-            }
-            //Increment Parameter
-            for(size_t p=0; p<dataset_.getNumberOfWeights(); ++p){
-               if(itC[p]<para_.testingPoints_[p]-1){
-                  ++itC[p];
-                  break;
-               }
-               else{
-                  itC[p]=0;
-                  if (p==dataset_.getNumberOfWeights()-1)
-                     search = false; 
-               }             
-            }
-         }
-         std::cout << "Best"<<std::endl;
-         for(size_t p=0; p<dataset_.getNumberOfWeights(); ++p){
-            std::cout << bestModelPara[p] <<" ";
-         }
-         std::cout << " ==> ";
-         std::cout << bestLoss << std::endl;
-         weights_ = bestModelPara;
-
-         // save best weights in dataset
-         for(size_t p=0; p<dataset_.getNumberOfWeights(); ++p){
-            dataset_.getWeights().setWeight(p, weights_[p]);
-         }
-      };
-   }
-}
-#endif
diff --git a/include/opengm/learning/loss/flexibleloss.hxx b/include/opengm/learning/loss/flexibleloss.hxx
deleted file mode 100644
index bad9466..0000000
--- a/include/opengm/learning/loss/flexibleloss.hxx
+++ /dev/null
@@ -1,305 +0,0 @@
-#pragma once
-#ifndef OPENGM_FLEXIBLE_LOSS_HXX
-#define OPENGM_FLEXIBLE_LOSS_HXX
-
-#include "opengm/functions/explicit_function.hxx"
-#include "opengm/graphicalmodel/graphicalmodel_hdf5.hxx"
-#include "hdf5.h"
-
-namespace opengm {
-namespace learning {
-
-/**
- * The generalized Hamming Loss incurs a penalty of nodeLossMultiplier[n] * labelLossMultiplier[l]
- * for node n taking label l, only if l is the same label as in the ground truth this amounts to zero.
- * One can imagine the overall cost matrix as outer product nodeLossMultiplier * labelLossMultiplier,
- * with zeros where the node label equals the ground truth.
- **/
-class FlexibleLoss{
-public:
-    class Parameter{
-    public:
-
-        enum LossType{
-            Hamming = 0 ,
-            L1 = 1,
-            L2 = 2,
-            Partition = 3,
-            ConfMat = 4
-        };
-
-        Parameter(){
-            lossType_ = Hamming;
-        }
-
-
-        bool operator==(const FlexibleLoss & other) const{
-            throw opengm::RuntimeError("do not call me");
-        }
-        bool operator<(const FlexibleLoss & other) const{
-            throw opengm::RuntimeError("do not call me");    
-        }
-        bool operator>(const FlexibleLoss & other) const{
-            throw opengm::RuntimeError("do not call me");
-        }
-        double getNodeLossMultiplier(const size_t i) const;
-        double getLabelLossMultiplier(const size_t i) const;
-        double getFactorLossMultiplier(const size_t i) const;
-        double getLabelConfMatMultiplier(const size_t l, const size_t lgt)const;
-        /**
-         * serializes the parameter object to the given hdf5 group handle;
-         * the group must contain a dataset "lossType" containing the
-         * loss type as a string
-         **/
-        void save(hid_t& groupHandle) const;
-        void load(const hid_t& groupHandle);
-        static std::size_t getLossId() { return lossId_; }
-
-        LossType lossType_;
-        std::vector<double>     nodeLossMultiplier_;
-        std::vector<double>     labelLossMultiplier_;
-        std::vector<double>     factorMultipier_;
-        marray::Marray<double>  confMat_;
-        
-
-
-    private:
-        static const std::size_t lossId_ = 16006;
-
-    };
-
-
-public:
-    FlexibleLoss(const Parameter& param = Parameter()) : param_(param){}
-
-    template<class GM, class IT1, class IT2>
-            double loss(const GM & gm, IT1 labelBegin, IT1 labelEnd, IT2 GTBegin,IT2 GTEnd) const;
-
-    template<class GM, class IT>
-    void addLoss(GM& gm, IT GTBegin) const;
-
-private:
-    Parameter param_;
-};
-
-inline double FlexibleLoss::Parameter::getNodeLossMultiplier(const size_t i) const {
-    if(i >= this->nodeLossMultiplier_.size()) {
-        return 1.;
-    }
-    return this->nodeLossMultiplier_[i];
-}
-
-inline double FlexibleLoss::Parameter::getFactorLossMultiplier(const size_t i) const {
-    if(i >= this->factorMultipier_.size()) {
-        return 1.;
-    }
-    return this->factorMultipier_[i];
-}
-
-inline double FlexibleLoss::Parameter::getLabelLossMultiplier(const size_t i) const {
-    if(i >= this->labelLossMultiplier_.size()) {
-        return 1.;
-    }
-    return this->labelLossMultiplier_[i];
-}
-
-inline double FlexibleLoss::Parameter::getLabelConfMatMultiplier(const size_t l, const size_t lgt)const{
-    if(l<confMat_.shape(0) && lgt<confMat_.shape(1)){
-        return confMat_(l, lgt);
-    }
-    return 1.0;
-}
-
-inline void FlexibleLoss::Parameter::save(hid_t& groupHandle) const {
-    std::vector<std::size_t> name;
-    name.push_back(this->getLossId());
-    marray::hdf5::save(groupHandle,"lossId",name);
-
-
-    std::vector<size_t> lossType(1, size_t(lossType_));
-    marray::hdf5::save(groupHandle,"lossType",lossType);
-
-    if (this->factorMultipier_.size() > 0) {
-        marray::hdf5::save(groupHandle,"factorLossMultiplier",this->factorMultipier_);
-    }
-    if (this->nodeLossMultiplier_.size() > 0) {
-        marray::hdf5::save(groupHandle,"nodeLossMultiplier",this->nodeLossMultiplier_);
-    }
-    if (this->labelLossMultiplier_.size() > 0) {
-        marray::hdf5::save(groupHandle,"labelLossMultiplier",this->labelLossMultiplier_);
-    }
-}
-
-inline void FlexibleLoss::Parameter::load(const hid_t& groupHandle) {
-
-    std::cout<<"load loss type \n";
-    std::vector<size_t> lossType;
-    marray::hdf5::loadVec(groupHandle, "lossType", lossType);
-    if(lossType[0] == size_t(Hamming)){
-        lossType_ = Hamming;
-    }
-    else if(lossType[0] == size_t(L1)){
-        lossType_ = L1;
-    }
-    else if(lossType[0] == size_t(L1)){
-        lossType_ = L1;
-    }
-    else if(lossType[0] == size_t(L2)){
-        lossType_ = L2;
-    }
-    else if(lossType[0] == size_t(Partition)){
-        lossType_ = Partition;
-    }
-    else if(lossType[0] == size_t(ConfMat)){
-        lossType_ = ConfMat;
-    }
-
-    
-    if (H5Lexists(groupHandle, "nodeLossMultiplier", H5P_DEFAULT)) {
-        marray::hdf5::loadVec(groupHandle, "nodeLossMultiplier", this->nodeLossMultiplier_);
-    } 
-    else {
-        //std::cout << "nodeLossMultiplier of FlexibleLoss not found, setting default values" << std::endl;
-    }
-
-    //std::cout<<"load factorLossMultiplier \n";
-    if (H5Lexists(groupHandle, "factorLossMultiplier", H5P_DEFAULT)  ) {
-        marray::hdf5::loadVec(groupHandle, "factorLossMultiplier", this->factorMultipier_);
-    } 
-    else {
-        //std::cout << "factorLossMultiplier of FlexibleLoss not found, setting default values" << std::endl;
-    }
-
-    //std::cout<<"load labelLossMultiplier \n";
-    if (H5Lexists(groupHandle, "labelLossMultiplier", H5P_DEFAULT) ) {
-        marray::hdf5::loadVec(groupHandle, "labelLossMultiplier", this->labelLossMultiplier_);
-    } 
-    else {
-        //std::cout << "labelLossMultiplier of FlexibleLoss not found, setting default values" << std::endl;
-    }
-}
-
-template<class GM, class IT1, class IT2>
-double FlexibleLoss::loss(const GM & gm, IT1 labelBegin, const IT1 labelEnd, IT2 GTBegin, const IT2 GTEnd) const
-{
-    typedef typename  GM::LabelType LabelType;
-    typedef typename  GM::IndexType IndexType;
-    typedef typename  GM::ValueType ValueType;
-
-    double loss = 0.0;
-    size_t nodeIndex = 0;
-    if(param_.lossType_ == Parameter::Hamming){
-        for(; labelBegin!= labelEnd; ++labelBegin, ++GTBegin, ++nodeIndex){
-            if(*labelBegin != *GTBegin){            
-                loss += param_.getNodeLossMultiplier(nodeIndex) * param_.getLabelLossMultiplier(*labelBegin);
-            }
-        }
-    }
-    else if(param_.lossType_ == Parameter::L1 || param_.lossType_ == Parameter::L2){
-        const size_t norm = param_.lossType_ == Parameter::L1 ? 1 : 2;
-        for(; labelBegin!= labelEnd; ++labelBegin, ++GTBegin, ++nodeIndex){
-            if(*labelBegin != *GTBegin){            
-                loss += param_.getNodeLossMultiplier(nodeIndex) * std::pow(std::abs(*GTBegin - *labelBegin), norm);
-            }
-        }
-    }
-    else if(param_.lossType_ == Parameter::ConfMat){
-        throw opengm::RuntimeError("ConfMat Loss is not yet implemented");
-    }
-    else if(param_.lossType_ == Parameter::Partition){
-
-        const size_t nFac = gm.numberOfFactors();
-
-        for(size_t fi=0; fi<nFac; ++fi){
-            const size_t nVar = gm[fi].numberOfVariables();
-            OPENGM_CHECK_OP(nVar,==,2,"Partition / Multicut Loss  is only allowed if the graphical model has only"
-                                      " second order factors (this might be changed in the future");
-            const IndexType vis[2] = { gm[fi].variableIndex(0), gm[fi].variableIndex(1)};
-            const LabelType nl[2]  = { gm.numberOfLabels(vis[0]), gm.numberOfLabels(vis[1])};
-            const double facVal = param_.getFactorLossMultiplier(fi);
-            // in the gt they are in the same cluster
-            if( (GTBegin[vis[0]] == GTBegin[vis[1]]) !=
-                (labelBegin[vis[0]] == labelBegin[vis[1]])  ){
-                loss +=facVal;
-            }
-        }
-    }
-    else{
-        throw opengm::RuntimeError("INTERNAL ERROR: unknown Loss Type");
-    }
-    return loss;
-}
-
-template<class GM, class IT>
-void FlexibleLoss::addLoss(GM& gm, IT gt) const
-{
-    typedef typename  GM::LabelType LabelType;
-    typedef typename  GM::IndexType IndexType;
-    typedef typename  GM::ValueType ValueType;
-    typedef opengm::ExplicitFunction<ValueType, IndexType,  LabelType>  ExplicitFunction;
-    typedef opengm::PottsFunction<ValueType, IndexType,  LabelType>  Potts;
-
-    if(param_.lossType_ == Parameter::Hamming){
-        for(IndexType i=0; i<gm.numberOfVariables(); ++i){
-            LabelType numL = gm.numberOfLabels(i);
-            ExplicitFunction f(&numL, &numL+1, 0);
-            for(LabelType l = 0; l < numL; ++l){
-                f(l) = - param_.getNodeLossMultiplier(i) * param_.getLabelLossMultiplier(l);
-            }
-            f(*gt) = 0;
-            ++gt;
-            gm.addFactor(gm.addFunction(f), &i, &i+1);     
-        }
-    }
-    else if(param_.lossType_ == Parameter::L1 || param_.lossType_ == Parameter::L2){
-        const size_t norm = param_.lossType_ == Parameter::L1 ? 1 : 2;
-        for(IndexType i=0; i<gm.numberOfVariables(); ++i){
-            LabelType numL = gm.numberOfLabels(i);
-            ExplicitFunction f(&numL, &numL+1, 0);
-            const LabelType gtL = *gt;
-            for(LabelType l = 0; l < numL; ++l){
-                f(l) = - param_.getNodeLossMultiplier(i) * std::pow(std::abs(gtL - l), norm);
-            }
-            f(*gt) = 0;
-            ++gt;
-            gm.addFactor(gm.addFunction(f), &i, &i+1);     
-        }
-    }
-    else if(param_.lossType_ == Parameter::ConfMat){
-        throw opengm::RuntimeError("ConfMat Loss is not yet implemented");
-    }
-    else if(param_.lossType_ == Parameter::Partition){
-
-        const size_t nFactorsInit = gm.numberOfFactors();
-
-        for(size_t fi=0; fi<nFactorsInit; ++fi){
-            const size_t nVar = gm[fi].numberOfVariables();
-            OPENGM_CHECK_OP(nVar,==,2,"Partition / Multicut Loss  is only allowed if the graphical model has only"
-                                      " second order factors (this might be changed in the future");
-
-            const IndexType vis[2] = { gm[fi].variableIndex(0), gm[fi].variableIndex(1)};
-            const LabelType nl[2]  = { gm.numberOfLabels(vis[0]), gm.numberOfLabels(vis[1])};
-
-            const double facVal = param_.getFactorLossMultiplier(fi);
-
-            // in the gt they are in the same cluster
-            if(gt[vis[0]] == gt[vis[1]]){
-                Potts pf(nl[0],nl[1], 0.0, -1.0*facVal);
-                gm.addFactor(gm.addFunction(pf), vis,vis+2);
-            }
-            // in the gt they are in different clusters
-            else{
-                Potts pf(nl[0],nl[1], -1.0*facVal, 0.0);
-                gm.addFactor(gm.addFunction(pf), vis,vis+2);
-            }
-        }
-    }
-    else{
-        throw opengm::RuntimeError("INTERNAL ERROR: unknown Loss Type");
-    }
-}
-
-} // namespace learning
-} // namespace opengm
-
-#endif 
diff --git a/include/opengm/learning/loss/generalized-hammingloss.hxx b/include/opengm/learning/loss/generalized-hammingloss.hxx
deleted file mode 100644
index e19d945..0000000
--- a/include/opengm/learning/loss/generalized-hammingloss.hxx
+++ /dev/null
@@ -1,152 +0,0 @@
-#pragma once
-#ifndef OPENGM_GENERALIZED_HAMMING_LOSS_HXX
-#define OPENGM_GENERALIZED_HAMMING_LOSS_HXX
-
-#include "opengm/functions/explicit_function.hxx"
-#include "opengm/graphicalmodel/graphicalmodel_hdf5.hxx"
-#include "hdf5.h"
-
-namespace opengm {
-namespace learning {
-
-/**
- * The generalized Hamming Loss incurs a penalty of nodeLossMultiplier[n] * labelLossMultiplier[l]
- * for node n taking label l, only if l is the same label as in the ground truth this amounts to zero.
- * One can imagine the overall cost matrix as outer product nodeLossMultiplier * labelLossMultiplier,
- * with zeros where the node label equals the ground truth.
- **/
-class GeneralizedHammingLoss{
-public:
-    class Parameter{
-    public:
-        double getNodeLossMultiplier(const size_t i) const;
-        double getLabelLossMultiplier(const size_t i) const;
-
-
-        bool operator==(const GeneralizedHammingLoss & other) const{
-                return nodeLossMultiplier_ == labelLossMultiplier_;
-        }
-        bool operator<(const GeneralizedHammingLoss & other) const{
-                return nodeLossMultiplier_ < labelLossMultiplier_;
-        }
-        bool operator>(const GeneralizedHammingLoss & other) const{
-                return nodeLossMultiplier_ > labelLossMultiplier_;
-        }
-
-        /**
-         * serializes the parameter object to the given hdf5 group handle;
-         * the group must contain a dataset "lossType" containing the
-         * loss type as a string
-         **/
-        void save(hid_t& groupHandle) const;
-        void load(const hid_t& groupHandle);
-        static std::size_t getLossId() { return lossId_; }
-
-
-        std::vector<double> nodeLossMultiplier_;
-        std::vector<double> labelLossMultiplier_;
-
-
-    private:
-        static const std::size_t lossId_ = 16001;
-
-    };
-
-
-public:
-    GeneralizedHammingLoss(const Parameter& param = Parameter()) : param_(param){}
-
-    template<class GM, class IT1, class IT2>
-            double loss(const GM & gm, IT1 labelBegin, IT1 labelEnd, IT2 GTBegin,IT2 GTEnd) const;
-
-    template<class GM, class IT>
-    void addLoss(GM& gm, IT GTBegin) const;
-
-private:
-    Parameter param_;
-};
-
-inline double GeneralizedHammingLoss::Parameter::getNodeLossMultiplier(const size_t i) const {
-    if(i >= this->nodeLossMultiplier_.size()) {
-        return 1.;
-    }
-    return this->nodeLossMultiplier_[i];
-}
-
-inline double GeneralizedHammingLoss::Parameter::getLabelLossMultiplier(const size_t i) const {
-    if(i >= this->labelLossMultiplier_.size()) {
-        return 1.;
-    }
-    return this->labelLossMultiplier_[i];
-}
-
-inline void GeneralizedHammingLoss::Parameter::save(hid_t& groupHandle) const {
-    std::vector<std::size_t> name;
-    name.push_back(this->getLossId());
-    marray::hdf5::save(groupHandle,"lossId",name);
-
-    if (this->nodeLossMultiplier_.size() > 0) {
-        marray::hdf5::save(groupHandle,"nodeLossMultiplier",this->nodeLossMultiplier_);
-    }
-    if (this->labelLossMultiplier_.size() > 0) {
-        marray::hdf5::save(groupHandle,"labelLossMultiplier",this->labelLossMultiplier_);
-    }
-}
-
-inline void GeneralizedHammingLoss::Parameter::load(const hid_t& groupHandle) {
-    if (H5Dopen(groupHandle, "nodeLossMultiplier", H5P_DEFAULT) >= 0) {
-        marray::hdf5::loadVec(groupHandle, "nodeLossMultiplier", this->nodeLossMultiplier_);
-    } else {
-        std::cout << "nodeLossMultiplier of GeneralizedHammingLoss not found, setting default values" << std::endl;
-    }
-
-    if (H5Dopen(groupHandle, "labelLossMultiplier", H5P_DEFAULT) >= 0) {
-        marray::hdf5::loadVec(groupHandle, "labelLossMultiplier", this->labelLossMultiplier_);
-    } else {
-        std::cout << "labelLossMultiplier of GeneralizedHammingLoss not found, setting default values" << std::endl;
-    }
-}
-
-template<class GM, class IT1, class IT2>
-double GeneralizedHammingLoss::loss(const GM & gm, IT1 labelBegin, const IT1 labelEnd, IT2 GTBegin, const IT2 GTEnd) const
-{
-    double loss = 0.0;
-    size_t nodeIndex = 0;
-
-    for(; labelBegin!= labelEnd; ++labelBegin, ++GTBegin, ++nodeIndex){
-        if(*labelBegin != *GTBegin){            
-            loss += param_.getNodeLossMultiplier(nodeIndex) * param_.getLabelLossMultiplier(*labelBegin);
-        }
-    }
-    return loss;
-}
-
-template<class GM, class IT>
-void GeneralizedHammingLoss::addLoss(GM& gm, IT gt) const
-{
-    //std::cout<<"start to add loss\n";
-    for(typename GM::IndexType i=0; i<gm.numberOfVariables(); ++i){
-        //std::cout<<"   vi"<<i<<"\n";
-        typename GM::LabelType numL = gm.numberOfLabels(i);
-        //std::cout<<"   vi numL"<<numL<<"\n";
-        opengm::ExplicitFunction<typename GM::ValueType,typename GM::IndexType, typename GM::LabelType> f(&numL, &numL+1, 0);
-
-        //std::cout<<"   apply multiplier\n";
-        for(typename GM::LabelType l = 0; l < numL; ++l){
-            f(l) = - param_.getNodeLossMultiplier(i) * param_.getLabelLossMultiplier(l);
-        }
-
-        f(*gt) = 0;
-        //std::cout<<"   increment\n";
-        ++gt;
-        //std::cout<<"   add\n";
-        gm.addFactor(gm.addFunction(f), &i, &i+1);
-        //std::cout<<"   next\n";
-    }
-    //std::cout<<"end add loss\n";
-}
-
-} // namespace learning
-} // namespace opengm
-
-#endif 
diff --git a/include/opengm/learning/loss/hammingloss.hxx b/include/opengm/learning/loss/hammingloss.hxx
deleted file mode 100644
index 47e272e..0000000
--- a/include/opengm/learning/loss/hammingloss.hxx
+++ /dev/null
@@ -1,81 +0,0 @@
-#pragma once
-#ifndef OPENGM_HAMMING_LOSS_HXX
-#define OPENGM_HAMMING_LOSS_HXX
-
-#include "opengm/functions/explicit_function.hxx"
-#include "opengm/graphicalmodel/graphicalmodel_hdf5.hxx"
-
-namespace opengm {
-   namespace learning {
-      class HammingLoss{
-      public:
-          class Parameter{
-            public:
-            bool operator==(const HammingLoss & other) const{
-                return true;
-            }
-            bool operator<(const HammingLoss & other) const{
-                return false;
-            }
-            bool operator>(const HammingLoss & other) const{
-                return false;
-            }
-            /**
-             * serializes the parameter object to the given hdf5 group handle;
-             * the group must contain a dataset "lossType" containing the
-             * loss type as a string
-             **/
-            void save(hid_t& groupHandle) const;
-            inline void load(const hid_t& ) {}
-            static std::size_t getLossId() { return lossId_; }
-          private:
-            static const std::size_t lossId_ = 16000;
-          };
-
-      public:
-         HammingLoss(const Parameter& param = Parameter()) : param_(param){}
-
-         template<class GM, class IT1, class IT2>
-         double loss(const GM & gm, IT1 labelBegin, IT1 labelEnd, IT2 GTBegin,IT2 GTEnd) const;
-  
-         template<class GM, class IT>
-         void addLoss(GM& gm, IT GTBegin) const;
-      private:
-         Parameter param_;
-      };
-
-      inline void HammingLoss::Parameter::save(hid_t &groupHandle) const {
-          std::vector<std::size_t> name;
-          name.push_back(this->getLossId());
-          marray::hdf5::save(groupHandle,"lossId",name);
-      }
-
-      template<class GM, class IT1, class IT2>
-      double HammingLoss::loss(const GM & gm, IT1 labelBegin, const IT1 labelEnd, IT2 GTBegin, const IT2 GTEnd) const
-      {
-         double loss = 0.0;
-         for(; labelBegin!= labelEnd; ++labelBegin, ++GTBegin){
-            if(*labelBegin != *GTBegin){
-               loss += 1.0;
-            }
-         }
-         return loss;
-      }
-
-      template<class GM, class IT>
-      void HammingLoss::addLoss(GM& gm, IT gt) const
-      {
-
-         for(typename GM::IndexType i=0; i<gm.numberOfVariables(); ++i){
-            typename GM::LabelType numL = gm.numberOfLabels(i);
-            opengm::ExplicitFunction<typename GM::ValueType,typename GM::IndexType, typename GM::LabelType> f(&numL, &numL+1,-1);
-            f(*gt) = 0;
-            ++gt;
-            gm.addFactor(gm.addFunction(f), &i, &(i)+1);
-         }
-      }
-
-   }  
-} // namespace opengm
-
-#endif 
diff --git a/include/opengm/learning/loss/noloss.hxx b/include/opengm/learning/loss/noloss.hxx
deleted file mode 100644
index e207433..0000000
--- a/include/opengm/learning/loss/noloss.hxx
+++ /dev/null
@@ -1,73 +0,0 @@
-#pragma once
-#ifndef OPENGM_NO_LOSS_HXX
-#define OPENGM_NO_LOSS_HXX
-
-#include "opengm/functions/explicit_function.hxx"
-#include "opengm/graphicalmodel/graphicalmodel_hdf5.hxx"
-
-namespace opengm {
-namespace learning {
-
-    class NoLoss{
-    public:
-        class Parameter{
-        public:
-            bool operator==(const NoLoss & other) const{
-                return true;
-            }
-            bool operator<(const NoLoss & other) const{
-                return false;
-            }
-            bool operator>(const NoLoss & other) const{
-                return false;
-            }
-            /**
-             * serializes the parameter object to the given hdf5 group handle;
-             * the group must contain a dataset "lossType" containing the
-             * loss type as a string
-             **/
-            void save(hid_t& groupHandle) const;
-            inline void load(const hid_t& ) {}
-            static std::size_t getLossId() { return lossId_; }
-        private:
-            static const std::size_t lossId_ = 0;
-        };
-
-    public:
-        NoLoss(const Parameter& param = Parameter()) 
-        : param_(param){
-
-        }
-
-        template<class GM, class IT1, class IT2>
-        double loss(const GM & gm, IT1 labelBegin, IT1 labelEnd, IT2 GTBegin,IT2 GTEnd) const;
-
-        template<class GM, class IT>
-        void addLoss(GM& gm, IT GTBegin) const;
-    private:
-        Parameter param_;
-
-    };
-
-    inline void NoLoss::Parameter::save(hid_t& groupHandle) const {
-        std::vector<std::size_t> name;
-        name.push_back(this->getLossId());
-        marray::hdf5::save(groupHandle,"lossId",name);
-    }
-
-    template<class GM, class IT1, class IT2>
-    double NoLoss::loss(const GM & gm, IT1 labelBegin, const IT1 labelEnd, IT2 GTBegin, const IT2 GTEnd) const
-    {
-        double loss = 0.0;
-        return loss;
-    }
-
-    template<class GM, class IT>
-    void NoLoss::addLoss(GM& gm, IT gt) const
-    {
-    }
-
-}  
-} // namespace opengm
-
-#endif 
diff --git a/include/opengm/learning/maximum-likelihood-learning.hxx b/include/opengm/learning/maximum-likelihood-learning.hxx
deleted file mode 100644
index 3bac158..0000000
--- a/include/opengm/learning/maximum-likelihood-learning.hxx
+++ /dev/null
@@ -1,310 +0,0 @@
-#pragma once
-#ifndef OPENGM_MAXIMUM_LIKELIHOOD_LEARNER_HXX
-#define OPENGM_MAXIMUM_LIKELIHOOD_LEARNER_HXX
-
-#include <vector>
-#include <fstream>
-#include <opengm/inference/messagepassing/messagepassing.hxx>
-//#include <opengm/functions/explicit_function.hxx>
-#include <opengm/functions/view_convert_function.hxx>
-//#include <opengm/functions/learnable/lpotts.hxx>
-//#include <opengm/functions/learnable/lweightedsum_of_functions.hxx>
-#include <opengm/graphicalmodel/graphicalmodel.hxx>
-//#include <opengm/inference/icm.hxx>
-//
-//typedef double ValueType;
-//typedef size_t IndexType;
-//typedef size_t LabelType;
-//typedef opengm::meta::TypeListGenerator<
-//    opengm::ExplicitFunction<ValueType,IndexType,LabelType>,
-//    opengm::functions::learnable::LPotts<ValueType,IndexType,LabelType>,
-//    opengm::functions::learnable::LWeightedSumOfFunctions<ValueType,IndexType,LabelType>
-//>::type FunctionListType;
-//
-//typedef opengm::GraphicalModel<
-//    ValueType,opengm::Adder,
-//    FunctionListType,
-//    opengm::DiscreteSpace<IndexType,LabelType>
-//> GM;
-//
-//typedef opengm::ICM<GM,opengm::Minimizer> INF;
-//typedef opengm::learning::Weights<ValueType> WeightType;
-
-
-
-namespace opengm {
-namespace learning {
-
-template<class IT> 
-class WeightGradientFunctor{
-public:
-   WeightGradientFunctor(size_t weightIndex, IT labelVectorBegin) //std::vector<size_t>::iterator labelVectorBegin)
-        : weightIndex_(weightIndex),
-          labelVectorBegin_(labelVectorBegin){
-    }
-
-    template<class F>
-    void operator()(const F & function ){
-        size_t index=-1;
-        for(size_t i=0; i<function.numberOfWeights();++i)
-            if(function.weightIndex(i)==weightIndex_)
-                index=i;
-        if(index!=-1)
-            result_ = function.weightGradient(index, labelVectorBegin_);
-        else
-            result_ = 0;
-    }
-
-    size_t weightIndex_;
-    IT  labelVectorBegin_;
-    double result_;
-};
-
-template<class DATASET>
-class MaximumLikelihoodLearner
-{
-public:
-    typedef DATASET DatasetType;
-    typedef typename DATASET::GMType   GMType;
-    typedef typename GMType::ValueType ValueType;
-    typedef typename GMType::IndexType IndexType;
-    typedef typename GMType::LabelType LabelType;
-    typedef typename GMType::FactorType FactorType;
-    typedef opengm::learning::Weights<ValueType> WeightType;  
-
-    typedef typename opengm::ExplicitFunction<ValueType,IndexType,LabelType> FunctionType;
-    typedef typename opengm::ViewConvertFunction<GMType,Minimizer,ValueType> ViewFunctionType;
-    typedef typename GMType::FunctionIdentifier FunctionIdentifierType;
-    typedef typename opengm::meta::TypeListGenerator<FunctionType,ViewFunctionType>::type FunctionListType;
-    typedef opengm::GraphicalModel<ValueType,opengm::Multiplier, FunctionListType, opengm::DiscreteSpace<IndexType,LabelType> > GmBpType;
-    typedef BeliefPropagationUpdateRules<GmBpType, opengm::Integrator> UpdateRules;
-    typedef MessagePassing<GmBpType, opengm::Integrator, UpdateRules, opengm::MaxDistance> BeliefPropagation;
-   
-    class Parameter{
-    public:
-       size_t maxNumSteps_;
-       Parameter() :
-          maxNumSteps_(100)
-          {;}
-    };
-   
-
-    MaximumLikelihoodLearner(DATASET&, const Parameter & w= Parameter() );
-
-   //  template<class INF>
-   void learn();//const typename INF::Parameter&);
-
-    const opengm::learning::Weights<ValueType>& getModelWeights(){return modelWeights_;}
-    Parameter& getLerningWeights(){return param_;}
-
-private:
-    DATASET& dataset_;
-    opengm::learning::Weights<ValueType> modelWeights_;
-    Parameter param_;
-};
-
-template<class DATASET>
-MaximumLikelihoodLearner<DATASET>::MaximumLikelihoodLearner(DATASET& ds, const Parameter& w)
-    : dataset_(ds), param_(w)
-{
-    modelWeights_ = opengm::learning::Weights<ValueType>(ds.getNumberOfWeights());
-}
-
-
-template<class DATASET>
-//template<class INF>
-void MaximumLikelihoodLearner<DATASET>::learn(){//const typename INF::Parameter &infParam){
-
-    opengm::learning::Weights<ValueType> modelWeight( dataset_.getNumberOfWeights() );
-    opengm::learning::Weights<ValueType> bestModelWeight( dataset_.getNumberOfWeights() );
-    //double bestLoss = 100000000.0;
-    std::vector<ValueType> point(dataset_.getNumberOfWeights(),0);
-    std::vector<ValueType> gradient(dataset_.getNumberOfWeights(),0);
-    std::vector<ValueType> Delta(dataset_.getNumberOfWeights(),0);
-    for(IndexType p=0; p<dataset_.getNumberOfWeights(); ++p)
-        point[p] = ValueType((0));
-
-
-    typename DATASET::LossType lossFunction;
-    bool search=true;
-    int count=0;
-
-    std::vector< std::vector<ValueType> > w( dataset_.getNumberOfModels(), std::vector<ValueType> ( dataset_.getModel(0).numberOfVariables()) );
-
-    /***********************************************************************************************************/
-    // construct Ground Truth dependent weights
-    /***********************************************************************************************************/
-
-    for(IndexType m=0; m<dataset_.getNumberOfModels(); ++m){ // for each model
-        const GMType &model = dataset_.getModel(m);
-        const std::vector<LabelType>& gt =  dataset_.getGT(m);
-
-        for(IndexType v=0; v<model.numberOfVariables();++v)
-            w[m][v]=(ValueType)gt[v];
-    }
-
-    ValueType eta = 0.1;
-    ValueType delta = 0.25; // 0 <= delta <= 0.5
-    ValueType D_a = 1.0; // distance treshold
-    ValueType optFun, bestOptFun=0.0;
-
-    while(search){
-        ++count;
-        //if (count % 1000 == 0)
-        std::cout << "---count--->" << count << "     ";
-
-        // Get Weights
-        for(IndexType p=0; p<dataset_.getNumberOfWeights(); ++p){
-            modelWeight.setWeight(p, point[p]);
-        }
-
-        // /***********************************************************************************************************/
-        // // calculate current loss - not needed
-        // /***********************************************************************************************************/
-        // opengm::learning::Weights<ValueType>& mp =  dataset_.getWeights();
-        // mp = modelWeight;
-        // std::vector< std::vector<typename INF::LabelType> > confs( dataset_.getNumberOfModels() );
-        // double loss = 0;
-        // for(size_t m=0; m<dataset_.getNumberOfModels(); ++m){
-        //    INF inf( dataset_.getModel(m),infParam);
-        //    inf.infer();
-        //    inf.arg(confs[m]);
-        //    const std::vector<typename INF::LabelType>& gt =  dataset_.getGT(m);
-        //    loss += lossFunction.loss(dataset_.getModel(m), confs[m].begin(), confs[m].end(), gt.begin(), gt.end());
-        // }
-
-        // std::cout << " eta = " << eta << "   weights  ";//<< std::endl;
-        // for(IndexType p=0; p<dataset_.getNumberOfWeights(); ++p){
-        //     std::cout << modelWeight[p] << " " ;
-        // }
-
-        // optFun=0.0;
-
-        /***********************************************************************************************************/
-        // Loopy Belief Propagation setup
-        /***********************************************************************************************************/
-     
-
-        const IndexType maxNumberOfIterations = 40;
-        const double convergenceBound = 1e-7;
-        const double damping = 0.5;
-        typename BeliefPropagation::Parameter weight(maxNumberOfIterations, convergenceBound, damping);
-
-        std::vector< std::vector<ValueType> > b  ( dataset_.getNumberOfModels(), std::vector<ValueType> ( dataset_.getModel(0).numberOfFactors()) );
-
-        for(IndexType m=0; m<dataset_.getNumberOfModels(); ++m){
-
-           //****************************************
-           // Build dummy model
-           //***************************************
-            GmBpType bpModel(dataset_.getModel(m).space());
-
-            for(IndexType f = 0; f<dataset_.getModel(m).numberOfFactors();++f){
-                const typename GMType::FactorType& factor=dataset_.getModel(m)[f];
-                typedef typename opengm::ViewConvertFunction<GMType,Minimizer,ValueType> ViewFunctionType;
-                typedef typename GMType::FunctionIdentifier FunctionIdentifierType;
-                FunctionIdentifierType fid = bpModel.addFunction(ViewFunctionType(factor));
-                bpModel.addFactor(fid, factor.variableIndicesBegin(), factor.variableIndicesEnd());
-            }
-            /***********************************************************************************************************/
-            // run: Loopy Belief Propagation
-            /***********************************************************************************************************/
-            BeliefPropagation bp(bpModel, weight);
-            const std::vector<LabelType>& gt =  dataset_.getGT(m);
-            bp.infer();
-            typename GMType::IndependentFactorType marg;
-
-            for(IndexType f = 0; f<dataset_.getModel(m).numberOfFactors();++f){
-                bp.factorMarginal(f, marg);
-                std::vector<IndexType> indexVector( marg.variableIndicesBegin(), marg.variableIndicesEnd() );
-                std::vector<LabelType> labelVector( marg.numberOfVariables());
-                for(IndexType v=0; v<marg.numberOfVariables();++v)
-                    labelVector[v] = gt[indexVector[v]];
-                b[m][f] = marg(labelVector.begin());
-            }
-        }
-
-        /***********************************************************************************************************/
-        // Calculate Gradient
-        /***********************************************************************************************************/
-        std::vector<ValueType> sum(dataset_.getNumberOfWeights());
-        for(IndexType p=0; p<dataset_.getNumberOfWeights();++p){
-            std::vector< std::vector<ValueType> >
-                piW(dataset_.getNumberOfModels(),
-                    std::vector<ValueType> ( dataset_.getModel(0).numberOfFactors()));
-
-            for(IndexType m=0; m<dataset_.getNumberOfModels(); ++m){
-                const GMType &model = dataset_.getModel(m);
-                const std::vector<LabelType>& gt =  dataset_.getGT(m);
-                ValueType f_p;
-
-                for(IndexType f=0; f<dataset_.getModel(m).numberOfFactors();++f){
-                    const FactorType &factor = dataset_.getModel(m)[f];
-                    std::vector<IndexType> indexVector( factor.variableIndicesBegin(), factor.variableIndicesEnd() );
-                    std::vector<LabelType> labelVector( factor.numberOfVariables());
-                    piW[m][f]=1.0;
-
-                    for(IndexType v=0; v<factor.numberOfVariables();++v){
-                        labelVector[v] = gt[indexVector[v]];
-                        piW[m][f] *=w[m][indexVector[v]];
-                    }
-                    WeightGradientFunctor<typename std::vector<LabelType>::iterator> weightGradientFunctor(p, labelVector.begin());
-                    factor.callFunctor(weightGradientFunctor);
-                    f_p =weightGradientFunctor.result_;
-
-                    // gradient
-                    // ( marginals - ground_truth ) * factor_gradient_p
-                    sum[p] += (b[m][f] - piW[m][f]) * f_p;
-
-                    // likelihood function
-                    // marginals - ground_truth * factor
-                    optFun += b[m][f] - piW[m][f] * factor(labelVector.begin());
-                }
-            }
-        }
-        //std::cout << " loss = " << loss << " optFun = " << optFun << " optFunTmp = " << optFunTmp << std::endl;
-        //std::cout << " loss = " << loss << " optFun = " << optFun << std::endl; 
-        std::cout << " optFun = " << optFun << std::endl;
-
-        if(optFun>=bestOptFun){
-            bestOptFun=optFun;
-            bestModelWeight=modelWeight;
-            bestOptFun=optFun;
-            //bestLoss=loss;
-        }
-
-        if (count>=param_.maxNumSteps_){
-            search = false;
-        }else{
-            // Calculate the next point
-            ValueType norm2=0.0;
-            for(IndexType p=0; p<dataset_.getNumberOfWeights(); ++p){
-                gradient[p] = sum[p];
-                norm2 += gradient[p]*gradient[p];
-            }
-            norm2 = std::sqrt(norm2);
-            for(IndexType p=0; p<dataset_.getNumberOfWeights(); ++p){
-                gradient[p] /= norm2;
-                std::cout << " gradient [" << p << "] = " << gradient[p] << std::endl;
-                point[p] += eta * gradient[p];
-
-            }
-            eta *= (ValueType)count/(count+1);
-        }
-    } // end while search
-
-    std::cout <<std::endl<< "Best weights: ";
-    for(IndexType p=0; p<dataset_.getNumberOfWeights(); ++p){
-        std::cout << bestModelWeight[p] <<" ";
-    }
-    std::cout << " ==> ";
-    //std::cout << " loss = " << bestLoss << " bestOptFun = " << bestOptFun << " gradient [" << 0 << "] = " << gradient[0] << std::endl;
-    std::cout << " bestOptFun = " << bestOptFun << " gradient [" << 0 << "] = " << gradient[0] << std::endl;
-
-    modelWeights_ = bestModelWeight;
-};
-}
-}
-#endif
-
-
diff --git a/include/opengm/learning/maximum_likelihood_learning.hxx b/include/opengm/learning/maximum_likelihood_learning.hxx
deleted file mode 100644
index d8c54b2..0000000
--- a/include/opengm/learning/maximum_likelihood_learning.hxx
+++ /dev/null
@@ -1,238 +0,0 @@
-#pragma once
-#ifndef OPENGM_MAXIMUM_LIKELIHOOD_LEARNER_HXX
-#define OPENGM_MAXIMUM_LIKELIHOOD_LEARNER_HXX
-
-#include <vector>
-#include <fstream>
-#include <opengm/graphicalmodel/graphicalmodel.hxx>
-#include <opengm/inference/messagepassing/messagepassing.hxx>
-#include <opengm/functions/view_convert_function.hxx>
-#include <iomanip>
-
-namespace opengm {
-   namespace learning {
-
-      template<class DATASET>
-      class MaximumLikelihoodLearner
-      {
-      public:
-         typedef DATASET                     DatasetType;
-         typedef typename DATASET::GMType    GMType;
-         typedef typename GMType::ValueType  ValueType;
-         typedef typename GMType::IndexType  IndexType;
-         typedef typename GMType::LabelType  LabelType;
-         typedef typename GMType::FactorType FactorType;
-         typedef Weights<ValueType>          WeightType;  
-
-         class Parameter{
-         public:
-	     size_t maximumNumberOfIterations_;
-	     double gradientStepSize_;
-	     double weightStoppingCriteria_;
-             double gradientStoppingCriteria_;
-             bool infoFlag_;
-             bool infoEveryStep_; 
-             double weightRegularizer_;
-	     size_t beliefPropagationMaximumNumberOfIterations_;
-	     double beliefPropagationConvergenceBound_;
-	     double beliefPropagationDamping_;
-	     double beliefPropagationTemperature_;
-	     opengm::Tribool beliefPropagationIsAcyclic_;
-	     Parameter():
-	         maximumNumberOfIterations_(100),
-	         gradientStepSize_(0.1),
-		 weightStoppingCriteria_(0.0000000000000001),
-		 gradientStoppingCriteria_(0.0000000000000001),
-		 infoFlag_(true),
-		 infoEveryStep_(false),
-		 weightRegularizer_(1.0),
-		 beliefPropagationMaximumNumberOfIterations_(40),
-		 beliefPropagationConvergenceBound_(0.0000001),
-		 beliefPropagationDamping_(0.5),
-		 beliefPropagationTemperature_(0.3),
-		 beliefPropagationIsAcyclic_(opengm::Tribool::Maybe)
-
-	   {;}
-         };
-
-         class WeightGradientFunctor{
-         public:
-            WeightGradientFunctor(DatasetType& ds) : dataset_(ds) { gradient_.resize(ds.getNumberOfWeights(),0.0);}
-            void setModel(size_t m) { modelID_ = m; } 
-            void setMarg(typename GMType::IndependentFactorType* marg){marg_= marg;}
-            double getGradient(size_t i) {return gradient_[i];}
-            
-            template<class F>
-            void operator()(const F & function ){
-               std::vector<LabelType> labelVector(marg_->numberOfVariables());
-               for(size_t i=0; i<marg_->numberOfVariables(); ++i)
-                  labelVector[i] = dataset_.getGT(modelID_)[marg_->variableIndex(i)]; 
-               for(size_t i=0; i<function.numberOfWeights();++i){
-		  size_t wID = function.weightIndex(i);
-                  gradient_[wID] -= function.weightGradient(i, labelVector.begin());
-               } 
-               
-               opengm::ShapeWalker<typename F::FunctionShapeIteratorType> shapeWalker(function.functionShapeBegin(), function.dimension());
-               for(size_t i=0;i<function.size();++i, ++shapeWalker) {                   
-                  for(size_t i=0; i<function.numberOfWeights();++i){
-                     size_t wID = function.weightIndex(i);
-                     gradient_[wID] += (*marg_)(shapeWalker.coordinateTuple().begin()) * function.weightGradient(i, shapeWalker.coordinateTuple().begin() );
-                  }
-               }              
-            }
-            
-         private:
-            DatasetType&                            dataset_;
-            size_t                                  modelID_;
-            std::vector<double>                     gradient_;  
-            typename GMType::IndependentFactorType* marg_;
-         };
-         
-         MaximumLikelihoodLearner(DATASET&, const Parameter&);
-
-	 void learn();
-         
-         const opengm::learning::Weights<ValueType>& getModelWeights(){return weights_;}
-         WeightType& getLerningWeights(){return weights_;}
-
-      private:
-         DATASET&     dataset_;
-         WeightType   weights_;
-         Parameter    param_;
-      }; 
-
-      template<class DATASET>
-      MaximumLikelihoodLearner<DATASET>::MaximumLikelihoodLearner(DATASET& ds, const Parameter& param )
-         : dataset_(ds), param_(param)
-      {
-          weights_ = opengm::learning::Weights<ValueType>(ds.getNumberOfWeights());
-      }
-
-      template<class DATASET>
-      void MaximumLikelihoodLearner<DATASET>::learn(){
-
-         typedef typename opengm::ExplicitFunction<ValueType,IndexType,LabelType>                                                    FunctionType;
-         typedef typename opengm::ViewConvertFunction<GMType,Minimizer,ValueType>                                                    ViewFunctionType;
-         typedef typename GMType::FunctionIdentifier                                                                                 FunctionIdentifierType;
-         typedef typename opengm::meta::TypeListGenerator<FunctionType,ViewFunctionType>::type                                       FunctionListType;
-         typedef opengm::GraphicalModel<ValueType,opengm::Multiplier, FunctionListType, opengm::DiscreteSpace<IndexType,LabelType> > GmBpType;
-         typedef BeliefPropagationUpdateRules<GmBpType, opengm::Integrator>                                                          UpdateRules;
-         typedef MessagePassing<GmBpType, opengm::Integrator, UpdateRules, opengm::MaxDistance>                                      BeliefPropagation;
-         
-         bool search = true; 
-         double invTemperature = 1.0/param_.beliefPropagationTemperature_;
-
-         if(param_.infoFlag_){
-	     std::cout << "INFO: Maximum Likelihood Learner: Maximum Number Of Iterations "<< param_.maximumNumberOfIterations_ << std::endl;
-	     std::cout << "INFO: Maximum Likelihood Learner: Gradient Step Size "<< param_.gradientStepSize_ << std::endl;
-	     std::cout << "INFO: Maximum Likelihood Learner: Gradient Stopping Criteria "<<param_. gradientStoppingCriteria_ << std::endl;
-	     std::cout << "INFO: Maximum Likelihood Learner: Weight Stopping Criteria "<< param_.weightStoppingCriteria_ << std::endl;
-	     std::cout << "INFO: Maximum Likelihood Learner: Info Flag "<< param_.infoFlag_ << std::endl;
-	     std::cout << "INFO: Maximum Likelihood Learner: Info Every Step "<< param_.infoEveryStep_ << std::endl;
-	     std::cout << "INFO: Maximum Likelihood Learner: Strength of regularizer for the Weight "<< param_.weightRegularizer_ << std::endl;
-	     std::cout << "INFO: Belief Propagation: Maximum Number Of Belief Propagation Iterations "<< param_.beliefPropagationMaximumNumberOfIterations_ << std::endl;
-	     std::cout << "INFO: Belief Propagation: Convergence Bound "<< param_.beliefPropagationConvergenceBound_ << std::endl;
-	     std::cout << "INFO: Belief Propagation: Damping "<< param_.beliefPropagationDamping_ << std::endl;
-	     std::cout << "INFO: Belief Propagation: Temperature "<< param_.beliefPropagationTemperature_ << std::endl;
-	     std::cout << "INFO: Belief Propagation: Acyclic Model "<< param_.beliefPropagationIsAcyclic_ << std::endl;
-	 }
-
-	 typename UpdateRules::SpecialParameterType specialParameter;//=UpdateRules::SpecialParameterType();
-         typename BeliefPropagation::Parameter infParam(
-	     param_.beliefPropagationMaximumNumberOfIterations_, 
-	     param_.beliefPropagationConvergenceBound_, 
-	     param_.beliefPropagationDamping_,
-	     specialParameter,
-	     param_.beliefPropagationIsAcyclic_
-	 );
-
-         size_t iterationCount = 0;
-         while(search){
-            if(iterationCount>=param_.maximumNumberOfIterations_) break;
-            ++iterationCount;
-	    if(param_.infoFlag_)
-	        std::cout << "\r Progress :  " << iterationCount << "/"<<param_.maximumNumberOfIterations_ <<" iteration     0/"<< dataset_.getNumberOfModels() << " models ";
-
-            typename GMType::IndependentFactorType marg;
-            WeightGradientFunctor wgf(dataset_); 
-
-            for(IndexType m=0; m<dataset_.getNumberOfModels(); ++m){ 
-	       if(param_.infoFlag_)
-                  std::cout << "\r Progress :  " << iterationCount << "/"<<param_.maximumNumberOfIterations_ << " iteration     "<<m<<"/"<< dataset_.getNumberOfModels()<<" models ";
-
-               dataset_.lockModel(m);
-               wgf.setModel(m);
-
-               //*********************************
-               //** Build dummy model and infer
-               //*********************************
-               GmBpType bpModel(dataset_.getModel(m).space());
-               for(IndexType f = 0; f<dataset_.getModel(m).numberOfFactors();++f){
-                  const typename GMType::FactorType& factor=dataset_.getModel(m)[f];
-                  typedef typename opengm::ViewConvertFunction<GMType,Minimizer,ValueType> ViewFunctionType;
-                  typedef typename GMType::FunctionIdentifier FunctionIdentifierType;
-                  FunctionIdentifierType fid = bpModel.addFunction(ViewFunctionType(factor,invTemperature));
-                  bpModel.addFactor(fid, factor.variableIndicesBegin(), factor.variableIndicesEnd());
-               } 
-
-               BeliefPropagation bp(bpModel, infParam);
-               bp.infer();
-               for(IndexType f=0; f<dataset_.getModel(m).numberOfFactors();++f){
-                  bp.factorMarginal(f, marg);
-                  
-                  
-                  wgf.setMarg(&marg);
-                  dataset_.getModel(m)[f].callFunctor(wgf);
-               }
-               dataset_.unlockModel(m);
-
-            }
-
-
-            //*****************************
-            //** Gradient Step
-            //************************
-            double gradientNorm = 0;
-            for(IndexType p=0; p<dataset_.getNumberOfWeights(); ++p){
-               gradientNorm += (wgf.getGradient(p)-2*param_.weightRegularizer_*weights_.getWeight(p)) * (wgf.getGradient(p)-2*param_.weightRegularizer_*weights_.getWeight(p));
-            }
-            gradientNorm = std::sqrt(gradientNorm);
-
-	    if(gradientNorm < param_.gradientStoppingCriteria_)
-	        search = false;
-
-	    if(param_.infoFlag_ and param_.infoEveryStep_)
-	        std::cout << "\r" << std::flush << " Iteration " << iterationCount <<" Gradient = ( ";
-
-	    double normGradientDelta = 0;
-            for(IndexType p=0; p<dataset_.getNumberOfWeights(); ++p){
-	        if(param_.infoFlag_ and param_.infoEveryStep_)
-		    std::cout << std::left << std::setfill(' ') << std::setw(10) << (wgf.getGradient(p)-2*param_.weightRegularizer_*weights_.getWeight(p))/gradientNorm << " ";
-
-		double gradientDelta;
-		gradientDelta=param_.gradientStepSize_/iterationCount * (wgf.getGradient(p)-2*param_.weightRegularizer_*weights_.getWeight(p))/gradientNorm;
-
-		normGradientDelta +=gradientDelta*gradientDelta;
-                dataset_.getWeights().setWeight(p, weights_.getWeight(p) + gradientDelta);
-                weights_.setWeight(p, weights_.getWeight(p) + gradientDelta); 
-            }
-	    normGradientDelta=std::sqrt(normGradientDelta);
-	    if( normGradientDelta < param_.weightStoppingCriteria_)
-	        search = false;
-
-	    if(param_.infoFlag_ and param_.infoEveryStep_){
-                std::cout << ") ";
-                std::cout << " Weight = ( ";
-                for(IndexType p=0; p<dataset_.getNumberOfWeights(); ++p)
-                    std::cout << std::left << std::setfill(' ') << std::setw(10) <<  weights_.getWeight(p) << " ";
-                std::cout << ") "<< "GradientNorm " << std::left << std::setfill(' ') << std::setw(10) << gradientNorm << " GradientDeltaNorm "<< std::setw(10) << normGradientDelta << "             " << std::endl;
-	    }
-	    else if (param_.infoFlag_)
-	      std::cout << "GradientNorm " << std::left << std::setfill(' ') << std::setw(10) << gradientNorm << " GradientDeltaNorm "<< std::setw(10) << normGradientDelta << "             " << std::flush;
-         }
-	 std::cout << "\r                                                                                                                                                                                                                                                                                                                                                                                                            " << std::flush;
-         std::cout << "\r Stoped after "<< iterationCount  << "/" << param_.maximumNumberOfIterations_<< " iterations. " <<std::endl;
-      }
-   }
-}
-#endif
diff --git a/include/opengm/learning/rws.hxx b/include/opengm/learning/rws.hxx
deleted file mode 100644
index 42c7cd0..0000000
--- a/include/opengm/learning/rws.hxx
+++ /dev/null
@@ -1,286 +0,0 @@
-#pragma once
-#ifndef OPENGM_SUBGRADIENT_SSVM_LEARNER_HXX
-#define OPENGM_SUBGRADIENT_SSVM_LEARNER_HXX
-
-#include <iomanip>
-#include <vector>
-#include <opengm/inference/inference.hxx>
-#include <opengm/graphicalmodel/weights.hxx>
-#include <opengm/utilities/random.hxx>
-#include <opengm/learning/gradient-accumulator.hxx>
-#include <opengm/learning/weight_averaging.hxx>
-
-#ifdef WITH_OPENMP
-#include <omp.h>
-#endif
-
-#include <boost/circular_buffer.hpp>
-#include <boost/math/distributions/normal.hpp>
-#include <boost/random/normal_distribution.hpp>
-#include <boost/random/mersenne_twister.hpp>
-#include <boost/random/variate_generator.hpp>
-
-
-namespace opengm {
-    namespace learning {
-
-
-
-    template<class T>
-    double gen_normal_3(T &generator)
-    {
-      return generator();
-    }
-
-    // Version that fills a vector
-    template<class T>
-    void gen_normal_3(T &generator,
-                  std::vector<double> &res)
-    {
-      for(size_t i=0; i<res.size(); ++i)
-        res[i]=generator();
-    }
-
-
-           
-    template<class DATASET>
-    class Rws
-    {
-    public: 
-        typedef DATASET DatasetType;
-        typedef typename DATASET::GMType   GMType; 
-        typedef typename DATASET::GMWITHLOSS GMWITHLOSS;
-        typedef typename DATASET::LossType LossType;
-        typedef typename GMType::ValueType ValueType;
-        typedef typename GMType::IndexType IndexType;
-        typedef typename GMType::LabelType LabelType; 
-        typedef opengm::learning::Weights<double> WeightsType;
-        typedef typename std::vector<LabelType>::const_iterator LabelIterator;
-        typedef FeatureAccumulator<GMType, LabelIterator> FeatureAcc;
-
-        typedef std::vector<LabelType> ConfType;
-        typedef boost::circular_buffer<ConfType> ConfBuffer;
-        typedef std::vector<ConfBuffer> ConfBufferVec;
-
-        class Parameter{
-        public:
-
-
-
-            Parameter(){
-                eps_ = 0.00001;
-                maxIterations_ = 10000;
-                stopLoss_ = 0.0;
-                learningRate_ = 1.0;
-                C_ = 1.0;
-                averaging_ = -1;
-                p_ = 10;
-                sigma_ = 1.0;
-            }       
-
-            double eps_;
-            size_t maxIterations_;
-            double stopLoss_;
-            double learningRate_;
-            double C_;
-            int averaging_;
-            size_t p_;
-            double sigma_;
-        };
-
-
-        Rws(DATASET&, const Parameter& );
-
-        template<class INF>
-        void learn(const typename INF::Parameter& para); 
-        //template<class INF, class VISITOR>
-        //void learn(typename INF::Parameter para, VITITOR vis);
-
-        const opengm::learning::Weights<double>& getWeights(){return weights_;}
-        Parameter& getLerningParameters(){return para_;}
-
-
-
-        double getLoss(const GMType & gm ,const GMWITHLOSS  & gmWithLoss, std::vector<LabelType> & labels){
-
-            double loss = 0 ;
-            std::vector<LabelType> subConf(20,0);
-
-            for(size_t fi=gm.numberOfFactors(); fi<gmWithLoss.numberOfFactors(); ++fi){
-                for(size_t v=0; v<gmWithLoss[fi].numberOfVariables(); ++v){
-                    subConf[v] = labels[ gmWithLoss[fi].variableIndex(v)];
-                }
-                loss +=  gmWithLoss[fi](subConf.begin());
-            }
-            return loss;
-        }
-
-    private:
-
-        double updateWeights();
-
-        DATASET& dataset_;
-        WeightsType  weights_;
-        Parameter para_;
-        size_t iteration_;
-        FeatureAcc featureAcc_;
-        WeightRegularizer<ValueType> wReg_;
-        WeightAveraging<double> weightAveraging_;
-    }; 
-
-    template<class DATASET>
-    Rws<DATASET>::Rws(DATASET& ds, const Parameter& p )
-    :   dataset_(ds), 
-        para_(p),
-        iteration_(0),
-        featureAcc_(ds.getNumberOfWeights()),
-        wReg_(2, 1.0/p.C_),
-        weightAveraging_(ds.getWeights(),p.averaging_)
-    {
-        featureAcc_.resetWeights();
-        weights_ = opengm::learning::Weights<double>(ds.getNumberOfWeights());
-    }
-
-
-    template<class DATASET>
-    template<class INF>
-    void Rws<DATASET>::learn(const typename INF::Parameter& para){
-
-
-        const size_t nModels = dataset_.getNumberOfModels();
-        const size_t nWegihts = dataset_.getNumberOfWeights();
-
-        
-        //for(size_t wi=0; wi<nWegihts; ++wi){
-        //    dataset_.getWeights().setWeight(wi, 0.0);
-        //}
-
-
-
-        RandomUniform<size_t> randModel(0, nModels);
-        boost::math::normal_distribution<ValueType> nDist(0.0, para_.sigma_);
-        std::vector< std::vector<ValueType> > noiseVecs(para_.p_, std::vector<ValueType>(nWegihts));
-        std::vector<ValueType> lossVec(para_.p_);
-
-        std::vector<ValueType> gradient(nWegihts);
-
-        boost::variate_generator<boost::mt19937, boost::normal_distribution<> >
-        generator(boost::mt19937(time(0)),boost::normal_distribution<>(0.0, para_.sigma_));
-
-        std::cout<<"online mode "<<nWegihts<<"\n";
-
-        std::cout <<"start loss"<< std::setw(6) << std::setfill(' ') << iteration_ << ':'
-                          << std::setw(8) << dataset_. template getTotalLossParallel<INF>(para) <<"  \n\n\n\n";
-
-
-        for(iteration_=0 ; iteration_<para_.maxIterations_; ++iteration_){
-
-
-
-
-            // get random model
-            const size_t gmi = randModel();
-
-            // save the current weights
-            WeightsType currentWeights  = dataset_.getWeights();
-
-
-            featureAcc_.resetWeights();
-
-            // lock the model
-            dataset_.lockModel(gmi);
-
-            for(size_t p=0; p<para_.p_; ++p){
-
-
-                // fill noise 
-                gen_normal_3(generator, noiseVecs[p]);
-
-                // add noise to the weights
-                for(size_t wi=0; wi<nWegihts; ++wi){
-                    const ValueType cw = currentWeights[wi];
-                    const ValueType nw = cw + noiseVecs[p][wi];
-                    dataset_.getWeights().setWeight(wi, nw);
-                }
-
-
-                const GMType & gm = dataset_.getModel(gmi);
-                // do inference
-                std::vector<LabelType> arg;
-                opengm::infer<INF>(gm, para, arg);
-                lossVec[p] = dataset_.getLoss(arg, gmi);
-                
-                //featureAcc_.accumulateModelFeatures(gm, dataset_.getGT(gmi).begin(), arg.begin());
-                // update weights
-                //const double wChange =updateWeights();      
-            }
-
-            //for(size_t wi=0; wi<nWegihts; ++wi){
-            //    gradient[wi] = featureAcc_.getWeight(wi);
-            //}
-            std::fill(gradient.begin(), gradient.end(),0.0);
-            for(size_t p=0; p<para_.p_; ++p){
-                for(size_t wi=0; wi<nWegihts; ++wi){
-                    gradient[wi] += (1.0/para_.p_)*(noiseVecs[p][wi])*lossVec[p];
-                }
-            }
-
-            const ValueType actualLearningRate = para_.learningRate_/(1.0 + iteration_);
-            //const ValueType actualLearningRate = para_.learningRate_;///(1.0 + iteration_);
-            // do update
-            for(size_t wi=0; wi<nWegihts; ++wi){
-                const ValueType oldWeight = currentWeights[wi];
-                const ValueType newWeights = (oldWeight - actualLearningRate*gradient[wi])*para_.C_;
-                //std::cout<<"wi "<<newWeights<<"\n";
-                dataset_.getWeights().setWeight(wi, newWeights);
-            }
-            std::cout<<"\n";
-            dataset_.unlockModel(gmi);
-
-            if(iteration_%10==0){
-            //if(iteration_%nModels*2 == 0 ){
-                std::cout << '\n'
-                          << std::setw(6) << std::setfill(' ') << iteration_ << ':'
-                          << std::setw(8) << dataset_. template getTotalLossParallel<INF>(para) <<"  "<< std::flush;
-
-            }
-
-        }
-  
-        weights_ = dataset_.getWeights();
-    }
-
-
-    template<class DATASET>
-    double Rws<DATASET>::updateWeights(){
-
-        const size_t nWegihts = dataset_.getNumberOfWeights();
-
-        WeightsType p(nWegihts);
-        WeightsType newWeights(nWegihts);
-
-
-        for(size_t wi=0; wi<nWegihts; ++wi){
-            p[wi] =  dataset_.getWeights().getWeight(wi);
-            p[wi] += para_.C_ * featureAcc_.getWeight(wi);
-        }
-
-
-        double wChange = 0.0;
-        
-        for(size_t wi=0; wi<nWegihts; ++wi){
-            const double wOld = dataset_.getWeights().getWeight(wi);
-            const double wNew = wOld - (para_.learningRate_/double(iteration_+1))*p[wi];
-            newWeights[wi] = wNew;
-        }
-
-        weightAveraging_(newWeights);
-
-
-
-        weights_ = dataset_.getWeights();
-        return wChange;
-    }
-}
-}
-#endif
diff --git a/include/opengm/learning/solver/BundleCollector.h b/include/opengm/learning/solver/BundleCollector.h
deleted file mode 100644
index a9bf12c..0000000
--- a/include/opengm/learning/solver/BundleCollector.h
+++ /dev/null
@@ -1,49 +0,0 @@
-#ifndef INFERENCE_BUNDLE_COLLECTOR_H__
-#define INFERENCE_BUNDLE_COLLECTOR_H__
-
-#include "LinearConstraints.h"
-
-namespace opengm {
-namespace learning {
-namespace solver {
-
-class BundleCollector {
-
-public:
-
-	template <typename ModelWeights>
-	void addHyperplane(const ModelWeights& a, double b);
-
-	const LinearConstraints& getConstraints() const { return _constraints; }
-
-private:
-
-	LinearConstraints _constraints;
-};
-
-template <typename ModelWeights>
-void
-BundleCollector::addHyperplane(const ModelWeights& a, double b) {
-	/*
-	  <w,a> + b ≤  ξ
-	        <=>
-	  <w,a> - ξ ≤ -b
-	*/
-
-	unsigned int dims = a.numberOfWeights();
-
-	LinearConstraint constraint;
-
-	for (unsigned int i = 0; i < dims; i++)
-		constraint.setCoefficient(i, a[i]);
-	constraint.setCoefficient(dims, -1.0);
-	constraint.setRelation(LessEqual);
-	constraint.setValue(-b);
-
-	_constraints.add(constraint);
-}
-
-}}} // namespace opengm::learning::solver
-
-#endif // INFERENCE_BUNDLE_COLLECTOR_H__
-
diff --git a/include/opengm/learning/solver/CplexBackend.h b/include/opengm/learning/solver/CplexBackend.h
deleted file mode 100644
index feda3f4..0000000
--- a/include/opengm/learning/solver/CplexBackend.h
+++ /dev/null
@@ -1,433 +0,0 @@
-#ifndef OPENGM_LEARNING_SOLVER_CPLEX_SOLVER_H__
-#define OPENGM_LEARNING_SOLVER_CPLEX_SOLVER_H__
-
-#ifdef WITH_CPLEX
-
-#include <string>
-#include <vector>
-
-#include <ilcplex/ilocplex.h>
-
-#include "LinearConstraints.h"
-#include "QuadraticObjective.h"
-#include "QuadraticSolverBackend.h"
-#include "Sense.h"
-#include "Solution.h"
-
-namespace opengm {
-namespace learning {
-namespace solver {
-
-/**
- * Cplex interface to solve the following (integer) quadratic program:
- *
- * min  <a,x> + xQx
- * s.t. Ax  == b
- *      Cx  <= d
- *      optionally: x_i \in {0,1} for all i
- *
- * Where (A,b) describes all linear equality constraints, (C,d) all linear
- * inequality constraints and x is the solution vector. a is a real-valued
- * vector denoting the coefficients of the objective and Q a PSD matrix giving
- * the quadratic coefficients of the objective.
- */
-class CplexBackend : public QuadraticSolverBackend {
-
-public:
-
-    struct Parameter {
-
-        Parameter() :
-            mipGap(0.0001),
-            mipFocus(0),
-            numThreads(0),
-            verbose(false) {}
-
-        // The Gurobi relative optimality gap.
-        double mipGap;
-
-        // The Gurobi MIP focus: 0 = balanced, 1 = feasible solutions, 2 =
-        // optimal solution, 3 = bound.
-        unsigned int mipFocus;
-
-        // The number of threads to be used by Gurobi. The default (0) uses all
-        // available CPUs.
-        unsigned int numThreads;
-
-        // Show the gurobi output.
-        bool verbose;
-    };
-
-    CplexBackend(const Parameter& parameter = Parameter());
-
-    virtual ~CplexBackend();
-
-    ///////////////////////////////////
-    // solver backend implementation //
-    ///////////////////////////////////
-
-    void initialize(
-            unsigned int numVariables,
-            VariableType variableType);
-
-    void initialize(
-            unsigned int                                numVariables,
-            VariableType                                defaultVariableType,
-            const std::map<unsigned int, VariableType>& specialVariableTypes);
-
-    void setObjective(const LinearObjective& objective);
-
-    void setObjective(const QuadraticObjective& objective);
-
-    void setConstraints(const LinearConstraints& constraints);
-
-    void addConstraint(const LinearConstraint& constraint);
-
-    bool solve(Solution& solution, double& value, std::string& message);
-
-private:
-
-    //////////////
-    // internal //
-    //////////////
-
-    // set the optimality gap
-    void setMIPGap(double gap);
-
-    // set the mpi focus
-    void setMIPFocus(unsigned int focus);
-
-    // set the number of threads to use
-    void setNumThreads(unsigned int numThreads);
-
-    // create a CPLEX constraint from a linear constraint
-    IloRange createConstraint(const LinearConstraint &constraint);
-
-    /**
-     * Enable solver output.
-     */
-    void setVerbose(bool verbose);
-
-    // size of a and x
-    unsigned int _numVariables;
-
-    // rows in A
-    unsigned int _numEqConstraints;
-
-    // rows in C
-    unsigned int _numIneqConstraints;
-
-    Parameter _parameter;
-
-    // the verbosity of the output
-    int _verbosity;
-
-    // a value by which to scale the objective
-    double _scale;
-
-    // Objective, constraints and cplex environment:
-    IloEnv env_;
-    IloModel model_;
-    IloNumVarArray x_;
-    IloRangeArray c_;
-    IloObjective obj_;
-    IloNumArray sol_;
-    IloCplex cplex_;
-    double constValue_;
-
-    typedef std::vector<IloExtractable> ConstraintVector;
-    ConstraintVector _constraints;
-};
-
-inline CplexBackend::CplexBackend(const Parameter& parameter) :
-    _parameter(parameter),
-    model_(env_),
-    x_(env_),
-    c_(env_),
-    obj_(env_),
-    sol_(env_)
-{
-    std::cout << "constructing cplex solver" << std::endl;
-}
-
-inline CplexBackend::~CplexBackend() {
-    std::cout << "destructing cplex solver..." << std::endl;
-}
-
-inline void
-CplexBackend::initialize(
-        unsigned int numVariables,
-        VariableType variableType) {
-
-    initialize(numVariables, variableType, std::map<unsigned int, VariableType>());
-}
-
-inline void
-CplexBackend::initialize(
-        unsigned int                                numVariables,
-        VariableType                                defaultVariableType,
-        const std::map<unsigned int, VariableType>& specialVariableTypes) {
-
-    _numVariables = numVariables;
-
-    // delete previous variables
-    x_.clear();
-
-    // add new variables to the model
-    if (defaultVariableType == Binary) {
-        std::cout << "creating " << _numVariables << " binary variables" << std::endl;
-        x_.add(IloNumVarArray(env_, _numVariables, 0, 1, ILOBOOL));
-    } else if (defaultVariableType == Continuous) {
-        std::cout << "creating " << _numVariables << " continuous variables" << std::endl;
-        x_.add(IloNumVarArray(env_, _numVariables, -IloInfinity, IloInfinity));
-    } else if (defaultVariableType == Integer) {
-        x_.add(IloNumVarArray(env_, _numVariables, -IloInfinity, IloInfinity, ILOINT));
-    }
-
-    // TODO: port me!
-//    // handle special variable types
-//    typedef std::map<unsigned int, VariableType>::const_iterator VarTypeIt;
-//    for (VarTypeIt i = specialVariableTypes.begin(); i != specialVariableTypes.end(); i++) {
-
-//        unsigned int v = i->first;
-//        VariableType type = i->second;
-
-//        char t = (type == Binary ? 'B' : (type == Integer ? 'I' : 'C'));
-//        _variables[v].set(GRB_CharAttr_VType, t);
-//    }
-
-    std::cout << "creating " << _numVariables << " ceofficients" << std::endl;
-}
-
-inline void
-CplexBackend::setObjective(const LinearObjective& objective) {
-
-    setObjective((QuadraticObjective)objective);
-}
-
-inline void
-CplexBackend::setObjective(const QuadraticObjective& objective) {
-
-    try {
-
-        // set sense of objective
-        if (objective.getSense() == Minimize)
-            obj_ = IloMinimize(env_);
-        else
-            obj_ = IloMaximize(env_);
-
-        // set the constant value of the objective
-        obj_.setConstant(objective.getConstant());
-
-        std::cout << "setting linear coefficients" << std::endl;
-
-        for(size_t i = 0; i < _numVariables; i++)
-        {
-            obj_.setLinearCoef(x_[i], objective.getCoefficients()[i]);
-        }
-
-        // set the quadratic coefficients for all pairs of variables
-        std::cout << "setting quadratic coefficients" << std::endl;
-
-        typedef std::map<std::pair<unsigned int, unsigned int>, double>::const_iterator QuadCoefIt;
-        for (QuadCoefIt i = objective.getQuadraticCoefficients().begin(); i != objective.getQuadraticCoefficients().end(); i++) {
-
-            const std::pair<unsigned int, unsigned int>& variables = i->first;
-            float value = i->second;
-
-            if (value != 0)
-                obj_.setQuadCoef(x_[variables.first], x_[variables.second], value);
-        }
-
-        model_.add(obj_);
-
-    } catch (IloCplex::Exception e) {
-
-        std::cerr << "CPLEX error: " << e.getMessage() << std::endl;
-    }
-}
-
-inline void
-CplexBackend::setConstraints(const LinearConstraints& constraints) {
-
-    // remove previous constraints
-    for (ConstraintVector::iterator constraint = _constraints.begin(); constraint != _constraints.end(); constraint++)
-        model_.remove(*constraint);
-    _constraints.clear();
-
-    // allocate memory for new constraints
-    _constraints.reserve(constraints.size());
-
-    try {
-        std::cout << "setting " << constraints.size() << " constraints" << std::endl;
-
-        IloExtractableArray cplex_constraints(env_);
-        for (LinearConstraints::const_iterator constraint = constraints.begin(); constraint != constraints.end(); constraint++) {
-            IloRange linearConstraint = createConstraint(*constraint);
-            _constraints.push_back(linearConstraint);
-            cplex_constraints.add(linearConstraint);
-        }
-
-        // add all constraints as batch to the model
-        model_.add(cplex_constraints);
-
-    } catch (IloCplex::Exception e) {
-
-        std::cerr << "error: " << e.getMessage() << std::endl;
-    }
-}
-
-inline void
-CplexBackend::addConstraint(const LinearConstraint& constraint) {
-
-    try {
-        std::cout << "adding a constraint" << std::endl;
-
-        // add to the model
-        _constraints.push_back(model_.add(createConstraint(constraint)));
-
-    } catch (IloCplex::Exception e) {
-
-        std::cerr << "error: " << e.getMessage() << std::endl;
-    }
-}
-
-inline IloRange
-CplexBackend::createConstraint(const LinearConstraint& constraint) {
-    // create the lhs expression
-    IloExpr linearExpr(env_);
-
-    // set the coefficients
-    typedef std::map<unsigned int, double>::const_iterator CoefIt;
-    for (CoefIt pair = constraint.getCoefficients().begin(); pair != constraint.getCoefficients().end(); pair++)
-    {
-        linearExpr.setLinearCoef(x_[pair->first], pair->second);
-    }
-
-    switch(constraint.getRelation())
-    {
-        case LessEqual:
-            return IloRange(env_, linearExpr, constraint.getValue());
-            break;
-        case GreaterEqual:
-            return IloRange(env_, constraint.getValue(), linearExpr);
-            break;
-    }
-}
-
-inline bool
-CplexBackend::solve(Solution& x, double& value, std::string& msg) {
-
-    try {
-        cplex_ = IloCplex(model_);
-        setVerbose(_parameter.verbose);
-
-        setMIPGap(_parameter.mipGap);
-
-        if (_parameter.mipFocus <= 3)
-            setMIPFocus(_parameter.mipFocus);
-        else
-            std::cerr << "Invalid value for MIP focus!" << std::endl;
-
-        setNumThreads(_parameter.numThreads);
-        if(!cplex_.solve()) {
-           std::cout << "failed to optimize. " << cplex_.getStatus() << std::endl;
-           msg = "Optimal solution *NOT* found";
-           return false;
-        }
-        else
-            if(_parameter.verbose == true)
-                msg = "Optimal solution found";
-
-        // extract solution
-        cplex_.getValues(sol_, x_);
-        x.resize(_numVariables);
-        for (unsigned int i = 0; i < _numVariables; i++)
-            x[i] = sol_[i];
-
-        // get current value of the objective
-        value = cplex_.getObjValue();
-
-        x.setValue(value);
-
-    } catch (IloCplex::Exception& e) {
-
-        std::cerr << "error: " << e.getMessage() << std::endl;
-
-        msg = e.getMessage();
-
-        return false;
-    }
-
-    return true;
-}
-
-inline void
-CplexBackend::setMIPGap(double gap) {
-     cplex_.setParam(IloCplex::EpGap, gap);
-}
-
-inline void
-CplexBackend::setMIPFocus(unsigned int focus) {
-    /*
-     * GUROBI and CPLEX have the same meaning for the values of the MIPFocus and MIPEmphasis parameter:
-     *
-     * GUROBI docs:
-     * If you are more interested in finding feasible solutions quickly, you can select MIPFocus=1.
-     * If you believe the solver is having no trouble finding good quality solutions,
-     * and wish to focus more attention on proving optimality, select MIPFocus=2.
-     * If the best objective bound is moving very slowly (or not at all), you may want to try MIPFocus=3
-     * to focus on the bound.
-     *
-     * CPLEX params:
-     * switch(focus) {
-        case MIP_EMPHASIS_BALANCED:
-            cplex_.setParam(IloCplex::MIPEmphasis, 0);
-            break;
-        case  MIP_EMPHASIS_FEASIBILITY:
-            cplex_.setParam(IloCplex::MIPEmphasis, 1);
-            break;
-        case MIP_EMPHASIS_OPTIMALITY:
-            cplex_.setParam(IloCplex::MIPEmphasis, 2);
-            break;
-        case MIP_EMPHASIS_BESTBOUND:
-            cplex_.setParam(IloCplex::MIPEmphasis, 3);
-            break;
-        }
-     */
-
-    cplex_.setParam(IloCplex::MIPEmphasis, focus);
-}
-
-inline void
-CplexBackend::setNumThreads(unsigned int numThreads) {
-    cplex_.setParam(IloCplex::Threads, numThreads);
-}
-
-inline void
-CplexBackend::setVerbose(bool verbose) {
-
-    // setup GRB environment
-    if (verbose)
-    {
-        cplex_.setParam(IloCplex::MIPDisplay, 1);
-        cplex_.setParam(IloCplex::SimDisplay, 1);
-        cplex_.setParam(IloCplex::SiftDisplay, 1);
-        cplex_.setParam(IloCplex::BarDisplay, 1);
-        cplex_.setParam(IloCplex::NetDisplay, 1);
-    }
-    else
-    {
-        cplex_.setParam(IloCplex::MIPDisplay, 0);
-        cplex_.setParam(IloCplex::SimDisplay, 0);
-        cplex_.setParam(IloCplex::SiftDisplay, 0);
-        cplex_.setParam(IloCplex::BarDisplay, 0);
-        cplex_.setParam(IloCplex::NetDisplay, 0);
-    }
-}
-
-}}} // namespace opengm::learning::solver
-
-#endif // WITH_CPLEX
-
-#endif // CPLEX_OPENGM_LEARNING_SOLVER_SOLVER_H__
diff --git a/include/opengm/learning/solver/GurobiBackend.h b/include/opengm/learning/solver/GurobiBackend.h
deleted file mode 100644
index 2638063..0000000
--- a/include/opengm/learning/solver/GurobiBackend.h
+++ /dev/null
@@ -1,439 +0,0 @@
-#ifndef OPENGM_LEARNING_SOLVER_GUROBI_SOLVER_H__
-#define OPENGM_LEARNING_SOLVER_GUROBI_SOLVER_H__
-
-#ifdef WITH_GUROBI
-
-#include <string>
-#include <vector>
-
-#include <gurobi_c++.h>
-
-#include "LinearConstraints.h"
-#include "QuadraticObjective.h"
-#include "QuadraticSolverBackend.h"
-#include "Sense.h"
-#include "Solution.h"
-
-namespace opengm {
-namespace learning {
-namespace solver {
-
-/**
- * Gurobi interface to solve the following (integer) quadratic program:
- *
- * min  <a,x> + xQx
- * s.t. Ax  == b
- *      Cx  <= d
- *      optionally: x_i \in {0,1} for all i
- *
- * Where (A,b) describes all linear equality constraints, (C,d) all linear
- * inequality constraints and x is the solution vector. a is a real-valued
- * vector denoting the coefficients of the objective and Q a PSD matrix giving
- * the quadratic coefficients of the objective.
- */
-class GurobiBackend : public QuadraticSolverBackend {
-
-public:
-
-	struct Parameter {
-
-		Parameter() :
-			mipGap(0.0001),
-			mipFocus(0),
-			numThreads(0),
-			verbose(false) {}
-
-		// The Gurobi relative optimality gap.
-		double mipGap;
-
-		// The Gurobi MIP focus: 0 = balanced, 1 = feasible solutions, 2 = 
-		// optimal solution, 3 = bound.
-		unsigned int mipFocus;
-
-		// The number of threads to be used by Gurobi. The default (0) uses all 
-		// available CPUs.
-		unsigned int numThreads;
-
-		// Show the gurobi output.
-		bool verbose;
-	};
-
-	GurobiBackend(const Parameter& parameter = Parameter());
-
-	virtual ~GurobiBackend();
-
-	///////////////////////////////////
-	// solver backend implementation //
-	///////////////////////////////////
-
-	void initialize(
-			unsigned int numVariables,
-			VariableType variableType);
-
-	void initialize(
-			unsigned int                                numVariables,
-			VariableType                                defaultVariableType,
-			const std::map<unsigned int, VariableType>& specialVariableTypes);
-
-	void setObjective(const LinearObjective& objective);
-
-	void setObjective(const QuadraticObjective& objective);
-
-	void setConstraints(const LinearConstraints& constraints);
-
-	void addConstraint(const LinearConstraint& constraint);
-
-	bool solve(Solution& solution, double& value, std::string& message);
-
-
-private:
-
-	//////////////
-	// internal //
-	//////////////
-
-	// dump the current problem to a file
-	void dumpProblem(std::string filename);
-
-	// set the optimality gap
-	void setMIPGap(double gap);
-
-	// set the mpi focus
-	void setMIPFocus(unsigned int focus);
-
-	// set the number of threads to use
-	void setNumThreads(unsigned int numThreads);
-
-    // create a gurobi constraint from a linear constraint
-    GRBConstr createConstraint(const LinearConstraint &constraint);
-
-	/**
-	 * Enable solver output.
-	 */
-	void setVerbose(bool verbose);
-
-	// size of a and x
-	unsigned int _numVariables;
-
-	// rows in A
-	unsigned int _numEqConstraints;
-
-	// rows in C
-	unsigned int _numIneqConstraints;
-
-	Parameter _parameter;
-
-	// the GRB environment
-	GRBEnv _env;
-
-	// the (binary) variables x
-	GRBVar* _variables;
-
-	// the objective
-	GRBQuadExpr _objective;
-
-	std::vector<GRBConstr> _constraints;
-
-	// the GRB model containing the objective and constraints
-	GRBModel _model;
-
-	// the verbosity of the output
-	int _verbosity;
-
-	// a value by which to scale the objective
-	double _scale;
-};
-
-inline GurobiBackend::GurobiBackend(const Parameter& parameter) :
-	_parameter(parameter),
-	_variables(0),
-	_model(_env) {
-}
-
-inline GurobiBackend::~GurobiBackend() {
-
-	std::cout << "destructing gurobi solver..." << std::endl;
-
-	if (_variables)
-		delete[] _variables;
-}
-
-inline void
-GurobiBackend::initialize(
-		unsigned int numVariables,
-		VariableType variableType) {
-
-	initialize(numVariables, variableType, std::map<unsigned int, VariableType>());
-}
-
-inline void
-GurobiBackend::initialize(
-		unsigned int                                numVariables,
-		VariableType                                defaultVariableType,
-		const std::map<unsigned int, VariableType>& specialVariableTypes) {
-
-	if (_parameter.verbose)
-		setVerbose(true);
-	else
-		setVerbose(false);
-
-	setMIPGap(_parameter.mipGap);
-
-	if (_parameter.mipFocus <= 3)
-		setMIPFocus(_parameter.mipFocus);
-	else
-		std::cerr << "Invalid value for MPI focus!" << std::endl;
-
-	setNumThreads(_parameter.numThreads);
-
-	_numVariables = numVariables;
-
-	// delete previous variables
-	if (_variables)
-		delete[] _variables;
-
-	// add new variables to the model
-	if (defaultVariableType == Binary) {
-
-		std::cout << "creating " << _numVariables << " binary variables" << std::endl;
-
-		_variables = _model.addVars(_numVariables, GRB_BINARY);
-
-		_model.update();
-
-	} else if (defaultVariableType == Continuous) {
-
-		std::cout << "creating " << _numVariables << " continuous variables" << std::endl;
-
-		_variables = _model.addVars(_numVariables, GRB_CONTINUOUS);
-
-		_model.update();
-
-		// remove default lower bound on variables
-		for (unsigned int i = 0; i < _numVariables; i++)
-			_variables[i].set(GRB_DoubleAttr_LB, -GRB_INFINITY);
-
-	} else if (defaultVariableType == Integer) {
-
-		std::cout << "creating " << _numVariables << " integer variables" << std::endl;
-
-		_variables = _model.addVars(_numVariables, GRB_INTEGER);
-
-		_model.update();
-
-		// remove default lower bound on variables
-		for (unsigned int i = 0; i < _numVariables; i++)
-			_variables[i].set(GRB_DoubleAttr_LB, -GRB_INFINITY);
-	}
-
-	// handle special variable types
-	typedef std::map<unsigned int, VariableType>::const_iterator VarTypeIt;
-	for (VarTypeIt i = specialVariableTypes.begin(); i != specialVariableTypes.end(); i++) {
-
-		unsigned int v = i->first;
-		VariableType type = i->second;
-
-		char t = (type == Binary ? 'B' : (type == Integer ? 'I' : 'C'));
-		_variables[v].set(GRB_CharAttr_VType, t);
-	}
-
-	std::cout << "creating " << _numVariables << " ceofficients" << std::endl;
-}
-
-inline void
-GurobiBackend::setObjective(const LinearObjective& objective) {
-
-	setObjective((QuadraticObjective)objective);
-}
-
-inline void
-GurobiBackend::setObjective(const QuadraticObjective& objective) {
-
-	try {
-
-		// set sense of objective
-		if (objective.getSense() == Minimize)
-			_model.set(GRB_IntAttr_ModelSense, 1);
-		else
-			_model.set(GRB_IntAttr_ModelSense, -1);
-
-		// set the constant value of the objective
-		_objective = objective.getConstant();
-
-		std::cout << "setting linear coefficients" << std::endl;
-
-		_objective.addTerms(&objective.getCoefficients()[0], _variables, _numVariables);
-
-		// set the quadratic coefficients for all pairs of variables
-		std::cout << "setting quadratic coefficients" << std::endl;
-
-		typedef std::map<std::pair<unsigned int, unsigned int>, double>::const_iterator QuadCoefIt;
-		for (QuadCoefIt i = objective.getQuadraticCoefficients().begin(); i != objective.getQuadraticCoefficients().end(); i++) {
-
-			const std::pair<unsigned int, unsigned int>& variables = i->first;
-			float value = i->second;
-
-			if (value != 0)
-				_objective += _variables[variables.first]*_variables[variables.second]*value;
-		}
-
-		_model.setObjective(_objective);
-
-		_model.update();
-
-	} catch (GRBException e) {
-
-		std::cerr << "error: " << e.getMessage() << std::endl;
-	}
-}
-
-inline void
-GurobiBackend::setConstraints(const LinearConstraints& constraints) {
-
-	// remove previous constraints
-	for (std::vector<GRBConstr>::iterator constraint = _constraints.begin(); constraint != _constraints.end(); constraint++)
-		_model.remove(*constraint);
-	_constraints.clear();
-
-	_model.update();
-
-	// allocate memory for new constraints
-	_constraints.reserve(constraints.size());
-
-	try {
-
-		std::cout << "setting " << constraints.size() << " constraints" << std::endl;
-
-		for (LinearConstraints::const_iterator constraint = constraints.begin(); constraint != constraints.end(); constraint++) {
-            _constraints.push_back(createConstraint(*constraint));
-		}
-
-		_model.update();
-
-	} catch (GRBException e) {
-
-		std::cerr << "error: " << e.getMessage() << std::endl;
-	}
-}
-
-inline void
-GurobiBackend::addConstraint(const LinearConstraint& constraint) {
-
-    try {
-
-        std::cout << "adding a constraint" << std::endl;
-
-        _constraints.push_back(createConstraint(constraint));
-        _model.update();
-
-    } catch (GRBException e) {
-        std::cerr << "error: " << e.getMessage() << std::endl;
-    }
-}
-
-inline GRBConstr
-GurobiBackend::createConstraint(const LinearConstraint& constraint)
-{
-    // create the lhs expression
-    GRBLinExpr lhsExpr;
-
-    // set the coefficients
-    typedef std::map<unsigned int, double>::const_iterator CoefIt;
-    for (CoefIt pair = constraint.getCoefficients().begin(); pair != constraint.getCoefficients().end(); pair++)
-        lhsExpr += pair->second * _variables[pair->first];
-
-    // construct constraint
-    return _model.addConstr(
-                lhsExpr,
-                (constraint.getRelation() == LessEqual ? GRB_LESS_EQUAL :
-                                                          (constraint.getRelation() == GreaterEqual ? GRB_GREATER_EQUAL :
-                                                                                                       GRB_EQUAL)),
-                constraint.getValue());
-}
-
-inline bool
-GurobiBackend::solve(Solution& x, double& value, std::string& msg) {
-
-	try {
-
-		_model.optimize();
-
-		int status = _model.get(GRB_IntAttr_Status);
-
-		if (status != GRB_OPTIMAL) {
-			msg = "Optimal solution *NOT* found";
-			return false;
-		} else
-			msg = "Optimal solution found";
-
-		// extract solution
-
-		x.resize(_numVariables);
-		for (unsigned int i = 0; i < _numVariables; i++)
-			x[i] = _variables[i].get(GRB_DoubleAttr_X);
-
-		// get current value of the objective
-		value = _model.get(GRB_DoubleAttr_ObjVal);
-
-		x.setValue(value);
-
-	} catch (GRBException e) {
-
-		std::cerr << "error: " << e.getMessage() << std::endl;
-
-		msg = e.getMessage();
-
-		return false;
-	}
-
-	return true;
-}
-
-inline void
-GurobiBackend::setMIPGap(double gap) {
-
-	_model.getEnv().set(GRB_DoubleParam_MIPGap, gap);
-}
-
-inline void
-GurobiBackend::setMIPFocus(unsigned int focus) {
-
-	_model.getEnv().set(GRB_IntParam_MIPFocus, focus);
-}
-
-inline void
-GurobiBackend::setNumThreads(unsigned int numThreads) {
-
-	_model.getEnv().set(GRB_IntParam_Threads, numThreads);
-}
-
-inline void
-GurobiBackend::setVerbose(bool verbose) {
-
-	// setup GRB environment
-	if (verbose)
-		_model.getEnv().set(GRB_IntParam_OutputFlag, 1);
-	else
-		_model.getEnv().set(GRB_IntParam_OutputFlag, 0);
-}
-
-inline void
-GurobiBackend::dumpProblem(std::string filename) {
-
-	try {
-
-		_model.write(filename);
-
-	} catch (GRBException e) {
-
-		std::cerr << "error: " << e.getMessage() << std::endl;
-	}
-}
-
-}}} // namespace opengm::learning::solver
-
-#endif // WITH_GUROBI
-
-#endif // GUROBI_OPENGM_LEARNING_SOLVER_SOLVER_H__
-
-
diff --git a/include/opengm/learning/solver/LinearConstraint.h b/include/opengm/learning/solver/LinearConstraint.h
deleted file mode 100644
index bec224c..0000000
--- a/include/opengm/learning/solver/LinearConstraint.h
+++ /dev/null
@@ -1,94 +0,0 @@
-#ifndef INFERENCE_LINEAR_CONSTRAINT_H__
-#define INFERENCE_LINEAR_CONSTRAINT_H__
-
-#include <map>
-
-#include "Relation.h"
-
-namespace opengm {
-namespace learning {
-namespace solver {
-
-/**
- * A sparse linear constraint.
- */
-class LinearConstraint {
-
-public:
-
-	LinearConstraint();
-
-	void setCoefficient(unsigned int varNum, double coef);
-
-	void setRelation(Relation relation);
-
-	void setValue(double value);
-
-	const std::map<unsigned int, double>& getCoefficients() const;
-
-	const Relation& getRelation() const;
-
-	double getValue() const;
-
-private:
-
-	std::map<unsigned int, double> _coefs;
-
-	Relation _relation;
-
-	double _value;
-};
-
-inline
-LinearConstraint::LinearConstraint() :
-	_relation(LessEqual) {}
-
-inline void
-LinearConstraint::setCoefficient(unsigned int varNum, double coef) {
-
-	if (coef == 0) {
-
-		std::map<unsigned int, double>::iterator i = _coefs.find(varNum);
-		if (i != _coefs.end())
-			_coefs.erase(_coefs.find(varNum));
-
-	} else {
-
-		_coefs[varNum] = coef;
-	}
-}
-
-inline void
-LinearConstraint::setRelation(Relation relation) {
-
-	_relation = relation;
-}
-
-inline void
-LinearConstraint::setValue(double value) {
-
-	_value = value;
-}
-
-inline const std::map<unsigned int, double>&
-LinearConstraint::getCoefficients() const {
-
-	return _coefs;
-}
-
-inline const Relation&
-LinearConstraint::getRelation() const {
-
-	return _relation;
-}
-
-inline double
-LinearConstraint::getValue() const {
-
-	return _value;
-}
-
-}}} // namspace opengm::learning::solver
-
-#endif // INFERENCE_LINEAR_CONSTRAINT_H__
-
diff --git a/include/opengm/learning/solver/LinearConstraints.h b/include/opengm/learning/solver/LinearConstraints.h
deleted file mode 100644
index ef2d4f3..0000000
--- a/include/opengm/learning/solver/LinearConstraints.h
+++ /dev/null
@@ -1,119 +0,0 @@
-#ifndef INFERENCE_LINEAR_CONSTRAINTS_H__
-#define INFERENCE_LINEAR_CONSTRAINTS_H__
-
-#include "LinearConstraint.h"
-
-namespace opengm {
-namespace learning {
-namespace solver {
-
-class LinearConstraints {
-
-	typedef std::vector<LinearConstraint> linear_constraints_type;
-
-public:
-
-	typedef linear_constraints_type::iterator       iterator;
-
-	typedef linear_constraints_type::const_iterator const_iterator;
-
-	/**
-	 * Create a new set of linear constraints and allocate enough memory to hold
-	 * 'size' linear constraints. More or less constraints can be added, but
-	 * memory might be wasted (if more allocated then necessary) or unnecessary
-	 * reallocations might occur (if more added than allocated).
-	 *
-	 * @param size The number of linear constraints to reserve memory for.
-	 */
-	LinearConstraints(size_t size = 0);
-
-	/**
-	 * Remove all constraints from this set of linear constraints.
-	 */
-	void clear() { _linearConstraints.clear(); }
-
-	/**
-	 * Add a linear constraint.
-	 *
-	 * @param linearConstraint The linear constraint to add.
-	 */
-	void add(const LinearConstraint& linearConstraint);
-
-	/**
-	 * Add a set of linear constraints.
-	 *
-	 * @param linearConstraints The set of linear constraints to add.
-	 */
-	void addAll(const LinearConstraints& linearConstraints);
-
-	/**
-	 * @return The number of linear constraints in this set.
-	 */
-	unsigned int size() const { return _linearConstraints.size(); }
-
-	const const_iterator begin() const { return _linearConstraints.begin(); }
-
-	iterator begin() { return _linearConstraints.begin(); }
-
-	const const_iterator end() const { return _linearConstraints.end(); }
-
-	iterator end() { return _linearConstraints.end(); }
-
-	const LinearConstraint& operator[](size_t i) const { return _linearConstraints[i]; }
-
-	LinearConstraint& operator[](size_t i) { return _linearConstraints[i]; }
-
-	/**
-	 * Get a list of indices of linear constraints that use the given variables.
-	 */
-	std::vector<unsigned int> getConstraints(const std::vector<unsigned int>& variableIds);
-
-private:
-
-	linear_constraints_type _linearConstraints;
-};
-
-inline
-LinearConstraints::LinearConstraints(size_t size) {
-
-	_linearConstraints.resize(size);
-}
-
-inline void
-LinearConstraints::add(const LinearConstraint& linearConstraint) {
-
-	_linearConstraints.push_back(linearConstraint);
-}
-
-inline void
-LinearConstraints::addAll(const LinearConstraints& linearConstraints) {
-
-	_linearConstraints.insert(_linearConstraints.end(), linearConstraints.begin(), linearConstraints.end());
-}
-
-inline std::vector<unsigned int>
-LinearConstraints::getConstraints(const std::vector<unsigned int>& variableIds) {
-
-	std::vector<unsigned int> indices;
-
-	for (unsigned int i = 0; i < size(); i++) {
-
-		LinearConstraint& constraint = _linearConstraints[i];
-
-		for (std::vector<unsigned int>::const_iterator v = variableIds.begin(); v != variableIds.end(); v++) {
-
-			if (constraint.getCoefficients().count(*v) != 0) {
-
-				indices.push_back(i);
-				break;
-			}
-		}
-	}
-
-	return indices;
-}
-
-}}} // namespace opengm::learning::solver
-
-#endif // INFERENCE_LINEAR_CONSTRAINTS_H__
-
diff --git a/include/opengm/learning/solver/LinearObjective.h b/include/opengm/learning/solver/LinearObjective.h
deleted file mode 100644
index a8f1b9e..0000000
--- a/include/opengm/learning/solver/LinearObjective.h
+++ /dev/null
@@ -1,24 +0,0 @@
-#ifndef INFERENCE_LINEAR_OBJECTIVE_H__
-#define INFERENCE_LINEAR_OBJECTIVE_H__
-
-#include "QuadraticObjective.h"
-
-namespace opengm {
-namespace learning {
-namespace solver {
-
-class LinearObjective : public QuadraticObjective {
-
-public:
-
-	LinearObjective(unsigned int size = 0) : QuadraticObjective(size) {}
-
-private:
-
-	using QuadraticObjective::setQuadraticCoefficient;
-};
-
-}}} // namspace opengm::learning::solver
-
-#endif // INFERENCE_OBJECTIVE_H__
-
diff --git a/include/opengm/learning/solver/LinearSolverBackend.h b/include/opengm/learning/solver/LinearSolverBackend.h
deleted file mode 100644
index 6ba5b2c..0000000
--- a/include/opengm/learning/solver/LinearSolverBackend.h
+++ /dev/null
@@ -1,84 +0,0 @@
-#ifndef INFERENCE_LINEAR_SOLVER_BACKEND_H__
-#define INFERENCE_LINEAR_SOLVER_BACKEND_H__
-
-#include "LinearObjective.h"
-#include "LinearConstraints.h"
-#include "Solution.h"
-#include "VariableType.h"
-
-namespace opengm {
-namespace learning {
-namespace solver {
-
-class LinearSolverBackend {
-
-public:
-
-	virtual ~LinearSolverBackend() {};
-
-	/**
-	 * Initialise the linear solver for the given type of variables.
-	 *
-	 * @param numVariables The number of variables in the problem.
-	 * @param variableType The type of the variables (Continuous, Integer,
-	 *                     Binary).
-	 */
-	virtual void initialize(
-			unsigned int numVariables,
-			VariableType variableType) = 0;
-
-	/**
-	 * Initialise the linear solver for the given type of variables.
-	 *
-	 * @param numVariables
-	 *             The number of variables in the problem.
-	 * 
-	 * @param defaultVariableType
-	 *             The default type of the variables (Continuous, Integer, 
-	 *             Binary).
-	 *
-	 * @param specialVariableTypes
-	 *             A map of variable numbers to variable types to override the 
-	 *             default.
-	 */
-	virtual void initialize(
-			unsigned int                                numVariables,
-			VariableType                                defaultVariableType,
-			const std::map<unsigned int, VariableType>& specialVariableTypes) = 0;
-
-	/**
-	 * Set the objective.
-	 *
-	 * @param objective A linear objective.
-	 */
-	virtual void setObjective(const LinearObjective& objective) = 0;
-
-	/**
-	 * Set the linear (in)equality constraints.
-	 *
-	 * @param constraints A set of linear constraints.
-	 */
-	virtual void setConstraints(const LinearConstraints& constraints) = 0;
-
-	/**
-	 * Add a single linear constraint.
-	 *
-	 * @param constraint The constraint to add.
-	 */
-	virtual void addConstraint(const LinearConstraint& constraint) = 0;
-
-	/**
-	 * Solve the problem.
-	 *
-	 * @param solution A solution object to write the solution to.
-	 * @param value The optimal value of the objective.
-	 * @param message A status message from the solver.
-	 * @return true, if the optimal value was found.
-	 */
-	virtual bool solve(Solution& solution, double& value, std::string& message) = 0;
-};
-
-}}} // namespace opengm::learning::solver
-
-#endif // INFERENCE_LINEAR_SOLVER_BACKEND_H__
-
diff --git a/include/opengm/learning/solver/QuadraticObjective.h b/include/opengm/learning/solver/QuadraticObjective.h
deleted file mode 100644
index f0ffcc7..0000000
--- a/include/opengm/learning/solver/QuadraticObjective.h
+++ /dev/null
@@ -1,181 +0,0 @@
-#ifndef INFERENCE_QUADRATIC_OBJECTIVE_H__
-#define INFERENCE_QUADRATIC_OBJECTIVE_H__
-
-#include <map>
-
-#include "Sense.h"
-
-namespace opengm {
-namespace learning {
-namespace solver {
-
-class QuadraticObjective {
-
-public:
-
-	/**
-	 * Create a new quadratic objective for 'size' varibales.
-	 *
-	 * @param size The number of coefficients in the objective.
-	 */
-	QuadraticObjective(unsigned int size = 0);
-
-	/**
-	 * Set the constant value of the expression.
-	 *
-	 * @param constant The value of the constant part of the objective.
-	 */
-	void setConstant(double constant);
-
-	/**
-	 * @return The value of the constant part of the objective.
-	 */
-	double getConstant() const;
-
-	/**
-	 * Add a coefficient.
-	 *
-	 * @param varNum The number of the variable to add the coefficient for.
-	 * @param coef The value of the coefficient.
-	 */
-	void setCoefficient(unsigned int varNum, double coef);
-
-	/**
-	 * Get the linear coefficients of this objective as a map of variable
-	 * numbers to coefficient values.
-	 *
-	 * @return A map from variable numbers to coefficient values.
-	 */
-	const std::vector<double>& getCoefficients() const;
-
-	/**
-	 * Add a quadratic coefficient. Use this to fill the Q matrix in the
-	 * objective <a,x> + xQx.
-	 *
-	 * @param varNum1 The row of Q.
-	 * @param varNum2 The columnt of Q.
-	 * @param coef The value of the coefficient.
-	 */
-	void setQuadraticCoefficient(unsigned int varNum1, unsigned int varNum2, double coef);
-
-	/**
-	 * Get the quadratic coefficients of this objective as a map of pairs of variable
-	 * numbers to coefficient values.
-	 *
-	 * @return A map from pairs of variable numbers to coefficient values.
-	 */
-	const std::map<std::pair<unsigned int, unsigned int>, double>& getQuadraticCoefficients() const;
-
-	/**
-	 * Set the sense of the objective.
-	 *
-	 * @param sense Minimize or Maximize.
-	 */
-	void setSense(Sense sense);
-
-	/**
-	 * Get the sense of this objective.
-	 *
-	 * @return Minimize or Maximize.
-	 */
-	Sense getSense() const;
-
-	/**
-	 * Resize the objective. New coefficients will be set to zero.
-	 *
-	 * @param The new size of the objective.
-	 */
-	void resize(unsigned int size);
-
-	/**
-	 * Get the number of variables in this objective.
-	 *
-	 * @return The number of variables in this objective.
-	 */
-	unsigned int size() const { return _coefs.size(); }
-
-private:
-
-	Sense _sense;
-
-	double _constant;
-
-	// linear coefficients are assumed to be dense, therefore we use a vector
-	std::vector<double> _coefs;
-
-	std::map<std::pair<unsigned int, unsigned int>, double> _quadraticCoefs;
-};
-
-inline
-QuadraticObjective::QuadraticObjective(unsigned int size) :
-	_sense(Minimize),
-	_constant(0) {
-
-	resize(size);
-}
-
-inline void
-QuadraticObjective::setConstant(double constant) {
-
-	_constant = constant;
-}
-
-inline double
-QuadraticObjective::getConstant() const {
-
-	return _constant;
-}
-
-inline void
-QuadraticObjective::setCoefficient(unsigned int varNum, double coef) {
-
-	_coefs[varNum] = coef;
-}
-
-inline const std::vector<double>&
-QuadraticObjective::getCoefficients() const {
-
-	return _coefs;
-}
-
-inline void
-QuadraticObjective::setQuadraticCoefficient(unsigned int varNum1, unsigned int varNum2, double coef) {
-
-	if (coef == 0) {
-
-		_quadraticCoefs.erase(_quadraticCoefs.find(std::make_pair(varNum1, varNum2)));
-
-	} else {
-
-		_quadraticCoefs[std::make_pair(varNum1, varNum2)] = coef;
-	}
-}
-
-inline const std::map<std::pair<unsigned int, unsigned int>, double>&
-QuadraticObjective::getQuadraticCoefficients() const {
-
-	return _quadraticCoefs;
-}
-
-inline void
-QuadraticObjective::setSense(Sense sense) {
-
-	_sense = sense;
-}
-
-inline Sense
-QuadraticObjective::getSense() const {
-
-	return _sense;
-}
-
-inline void
-QuadraticObjective::resize(unsigned int size) {
-
-	_coefs.resize(size, 0.0);
-}
-
-}}} // namespace opengm::learning::solver
-
-#endif // INFERENCE_QUADRATIC_OBJECTIVE_H__
-
diff --git a/include/opengm/learning/solver/QuadraticSolverBackend.h b/include/opengm/learning/solver/QuadraticSolverBackend.h
deleted file mode 100644
index cc3a160..0000000
--- a/include/opengm/learning/solver/QuadraticSolverBackend.h
+++ /dev/null
@@ -1,28 +0,0 @@
-#ifndef INFERENCE_QUADRATIC_SOLVER_BACKEND_H__
-#define INFERENCE_QUADRATIC_SOLVER_BACKEND_H__
-
-#include "QuadraticObjective.h"
-#include "LinearSolverBackend.h"
-
-namespace opengm {
-namespace learning {
-namespace solver {
-
-class QuadraticSolverBackend : public LinearSolverBackend {
-
-public:
-
-	virtual ~QuadraticSolverBackend() {};
-
-	/**
-	 * Set the objective.
-	 *
-	 * @param objective A quadratic objective.
-	 */
-	virtual void setObjective(const QuadraticObjective& objective) = 0;
-};
-
-}}} // namspace opengm::learning::solver
-
-#endif // INFERENCE_QUADRATIC_SOLVER_BACKEND_H__
-
diff --git a/include/opengm/learning/solver/QuadraticSolverFactory.h b/include/opengm/learning/solver/QuadraticSolverFactory.h
deleted file mode 100644
index e986630..0000000
--- a/include/opengm/learning/solver/QuadraticSolverFactory.h
+++ /dev/null
@@ -1,33 +0,0 @@
-#ifndef OPENGM_LEARNING_SOLVER_QUADRATIC_SOLVER_FACTORY_H__
-#define OPENGM_LEARNING_SOLVER_QUADRATIC_SOLVER_FACTORY_H__
-
-#ifdef WITH_GUROBI
-#include "GurobiBackend.h"
-#elif defined(WITH_CPLEX)
-#include "CplexBackend.h"
-#endif
-
-namespace opengm {
-namespace learning {
-namespace solver {
-
-class QuadraticSolverFactory {
-
-public:
-
-	static QuadraticSolverBackend* Create() {
-
-#ifdef WITH_GUROBI
-	return new GurobiBackend();
-#elif defined(WITH_CPLEX)
-        return new CplexBackend();
-#endif
-
-      throw opengm::RuntimeError("No quadratic solver available.");
-	}
-};
-
-}}} // namespace opengm::learning::solver
-
-#endif // OPENGM_LEARNING_SOLVER_QUADRATIC_SOLVER_FACTORY_H__
-
diff --git a/include/opengm/learning/solver/QuadraticSolverParameters.h b/include/opengm/learning/solver/QuadraticSolverParameters.h
deleted file mode 100644
index 42486e8..0000000
--- a/include/opengm/learning/solver/QuadraticSolverParameters.h
+++ /dev/null
@@ -1,15 +0,0 @@
-#ifndef INFERENCE_QUADRATIC_SOLVER_PARAMETERS_H__
-#define INFERENCE_QUADRATIC_SOLVER_PARAMETERS_H__
-
-#include "LinearSolverParameters.h"
-
-namespace opengm {
-namespace learning {
-namespace solver {
-
-class QuadraticSolverParameters : public LinearSolverParameters {};
-
-}}} // namspace opengm::learning::solver
-
-#endif // INFERENCE_QUADRATIC_SOLVER_PARAMETERS_H__
-
diff --git a/include/opengm/learning/solver/Relation.h b/include/opengm/learning/solver/Relation.h
deleted file mode 100644
index 7364591..0000000
--- a/include/opengm/learning/solver/Relation.h
+++ /dev/null
@@ -1,20 +0,0 @@
-#ifndef INFERENCE_RELATION_H__
-#define INFERENCE_RELATION_H__
-
-namespace opengm {
-namespace learning {
-namespace solver {
-
-/** Used to indicate the relation of a linear constraint.
- */
-enum Relation {
-
-	LessEqual,
-	Equal,
-	GreaterEqual
-};
-
-}}} // namspace opengm::learning::solver
-
-#endif // INFERENCE_RELATION_H__
-
diff --git a/include/opengm/learning/solver/Sense.h b/include/opengm/learning/solver/Sense.h
deleted file mode 100644
index 3f50c3a..0000000
--- a/include/opengm/learning/solver/Sense.h
+++ /dev/null
@@ -1,20 +0,0 @@
-#ifndef SENSE_H__
-#define SENSE_H__
-
-namespace opengm {
-namespace learning {
-namespace solver {
-
-/** Used to indicate whether an objective is supposed to be minimized or
- * maximized.
- */
-enum Sense {
-
-	Minimize,
-	Maximize
-};
-
-}}} // namspace opengm::learning::solver
-
-#endif // SENSE_H__
-
diff --git a/include/opengm/learning/solver/Solution.h b/include/opengm/learning/solver/Solution.h
deleted file mode 100644
index 8016bda..0000000
--- a/include/opengm/learning/solver/Solution.h
+++ /dev/null
@@ -1,49 +0,0 @@
-#ifndef INFERENCE_SOLUTION_H__
-#define INFERENCE_SOLUTION_H__
-
-namespace opengm {
-namespace learning {
-namespace solver {
-
-class Solution {
-
-public:
-
-	Solution(unsigned int size = 0);
-
-	void resize(unsigned int size);
-
-	unsigned int size() const { return _solution.size(); }
-
-	const double& operator[](unsigned int i) const { return _solution[i]; }
-
-	double& operator[](unsigned int i) { return _solution[i]; }
-
-	std::vector<double>& getVector() { return _solution; }
-
-	void setValue(double value) { _value = value; }
-
-	double getValue() { return _value; }
-
-private:
-
-	std::vector<double> _solution;
-
-	double _value;
-};
-
-inline Solution::Solution(unsigned int size) {
-
-	resize(size);
-}
-
-inline void
-Solution::resize(unsigned int size) {
-
-	_solution.resize(size);
-}
-
-}}} // namspace opengm::learning::solver
-
-#endif // INFERENCE_SOLUTION_H__
-
diff --git a/include/opengm/learning/solver/VariableType.h b/include/opengm/learning/solver/VariableType.h
deleted file mode 100644
index d107a41..0000000
--- a/include/opengm/learning/solver/VariableType.h
+++ /dev/null
@@ -1,18 +0,0 @@
-#ifndef INFERENCE_VARIABLE_TYPE_H__
-#define INFERENCE_VARIABLE_TYPE_H__
-
-namespace opengm {
-namespace learning {
-namespace solver {
-
-enum VariableType {
-
-	Continuous,
-	Integer,
-	Binary
-};
-
-}}} // namspace opengm::learning::solver
-
-#endif // INFERENCE_VARIABLE_TYPE_H__
-
diff --git a/include/opengm/learning/struct-max-margin.hxx b/include/opengm/learning/struct-max-margin.hxx
deleted file mode 100644
index 8af78ed..0000000
--- a/include/opengm/learning/struct-max-margin.hxx
+++ /dev/null
@@ -1,219 +0,0 @@
-#pragma once
-#ifndef OPENGM_LEARNING_STRUCT_MAX_MARGIN_HXX
-#define OPENGM_LEARNING_STRUCT_MAX_MARGIN_HXX
-
-#include "bundle-optimizer.hxx"
-#include "gradient-accumulator.hxx"
-
-#ifdef WITH_OPENMP
-#include <omp.h>
-#endif
-
-namespace opengm {
-
-namespace learning {
-
-template <
-		typename DS,
-		typename O = BundleOptimizer<typename DS::ValueType> >
-class StructMaxMargin {
-
-public:
-
-	typedef DS DatasetType;
-	typedef O  OptimizerType;
-
-    typedef typename DatasetType::GMType GMType;
-    typedef typename DatasetType::GMWITHLOSS GMWITHLOSS;
-	typedef typename DatasetType::ValueType       ValueType;
-    typedef typename DatasetType::Weights         Weights;
-
-	struct Parameter {
-        typedef typename OptimizerType::Parameter OptimizerParameter;
-        OptimizerParameter optimizerParameter_;
-	};
-
-	StructMaxMargin(DatasetType& dataset, const Parameter& parameter = Parameter()) :
-		_dataset(dataset),
-        _parameter(parameter),
-        _optimizer(parameter.optimizerParameter_)
-    {}
-
-	Parameter& parameter() { return _parameter; }
-
-    template <typename InferenceType>
-    void learn(const typename InferenceType::Parameter& parameter);
-
-    const Weights& getWeights() { return _weights; }
-
-private:
-
-	template <typename InferenceType>
-	class Oracle {
-
-		public:
-
-            Oracle(DatasetType& dataset, const typename InferenceType::Parameter& infParam) :
-                _dataset(dataset),
-                _infParam(infParam)
-            {}
-
-			/**
-			 * Evaluate the loss-augmented energy value of the dataset and its 
-			 * gradient at w.
-			 */
-            void operator()(const Weights& w, double& value, Weights& gradient) {
-
-				typedef std::vector<typename InferenceType::LabelType> ConfigurationType;
-
-				// initialize gradient and value with zero
-				for (int i = 0; i < gradient.numberOfWeights(); i++)
-					gradient[i] = 0;
-				value = 0;
-
-				// For each model E(y,w), we have to compute the value and 
-				// gradient of
-				//
-				//   max_y E(y',w) - E(y,w) + Δ(y',y)            (1)
-				//   =
-				//   max_y L(y,w)
-				//
-				// where y' is the best-effort solution (also known as 
-				// groundtruth) and w are the current weights. The loss 
-				// augmented model given by the dataset is
-				//
-				//   F(y,w) = E(y,w) - Δ(y',y).
-				//
-				// Let c = E(y',w) be the constant contribution of the 
-				// best-effort solution. (1) is equal to
-				//
-				//  -min_y -c + F(y,w).
-				//
-				// The gradient of the maximand in (1) at y* is
-				//
-				//   ∂L(y,w)/∂w = ∂E(y',w)/∂w -
-				//                ∂E(y,w)/∂w
-				//
-				//              = Σ_θ ∂θ(y'_θ,w)/∂w -
-				//                Σ_θ ∂θ(y_θ,w)/∂w,
-				//
-				// which is a positive gradient contribution for the 
-				// best-effort, and a negative contribution for the maximizer 
-				// y*.
-
-				// set the weights w in E(x,y) and F(x,y)
-				_dataset.getWeights() = w;
-
-                //if(_infParam.verbose_ )
-                //    std::cout << std::endl << " MODEL : ";
-
-                #ifdef WITH_OPENMP
-                omp_lock_t modelLock;
-                omp_init_lock(&modelLock);
-                #pragma omp parallel for
-                #endif
-                for (int i = 0; i < _dataset.getNumberOfModels(); i++) {
-                    // if(_infParam.verbose_ )
-                    //     std::cout << i;
-
-                    // lock the model
-                    #ifdef WITH_OPENMP
-                    omp_set_lock(&modelLock);
-                    _dataset.lockModel(i);
-                    omp_unset_lock(&modelLock);
-                    #else
-                    _dataset.lockModel(i);
-                    #endif
-                    // get E(x,y) and F(x,y)
-                    const GMType &     gm  = _dataset.getModel(i);
-					const GMWITHLOSS & gml = _dataset.getModelWithLoss(i);
-
-					// get the best-effort solution y'
-					const ConfigurationType& bestEffort = _dataset.getGT(i);
-
-					// compute constant c for current w
-					ValueType c = gm.evaluate(bestEffort);
-
-					// find the minimizer y* of F(y,w)
-					ConfigurationType mostViolated;
-                    InferenceType inference(gml, _infParam);
-
-                    inference.infer();
-                    inference.arg(mostViolated);
-
-					// the optimal value of (1) is now c - F(y*,w)
-                    #pragma omp atomic
-                    value += c - gml.evaluate(mostViolated);
-
-					// the gradients are
-					typedef GradientAccumulator<Weights, ConfigurationType> GA;
-                    GA gaBestEffort(gradient, bestEffort, GA::Add);
-                    GA gaMostViolated(gradient, mostViolated, GA::Subtract);
-                    for (size_t j = 0; j < gm.numberOfFactors(); j++) {
-
-						gm[j].callViFunctor(gaBestEffort);
-						gm[j].callViFunctor(gaMostViolated);
-					}
-
-                    // unlock the model
-                    #ifdef WITH_OPENMP
-                    omp_set_lock(&modelLock);
-                    _dataset.unlockModel(i);
-                    omp_unset_lock(&modelLock);
-                    #else
-                    _dataset.unlockModel(i);
-                    #endif
-                } // end for model
-			}
-
-            const typename InferenceType::Parameter& getInfParam(){
-                return _infParam;
-            }
-
-		private:
-
-			DatasetType& _dataset;
-            const typename InferenceType::Parameter& _infParam;
-	};
-
-	DatasetType& _dataset;
-
-	Parameter _parameter;
-
-	OptimizerType _optimizer;
-
-    Weights _weights;
-};
-
-template <typename DS, typename O>
-template <typename InferenceType>
-void
-StructMaxMargin<DS, O>::learn(const typename InferenceType::Parameter& infParams) {
-
-    typedef typename InferenceType:: template RebindGm<GMWITHLOSS>::type InfType;
-
-    typedef typename InfType::Parameter InfTypeParam;
-    InfTypeParam infTypeParam(infParams);
-    Oracle<InfType> oracle(_dataset, infTypeParam);
-
-	_weights = _dataset.getWeights();
-
-	// minimize structured loss
-    OptimizerResult result = _optimizer.optimize(oracle, _weights);
-
-	if (result == Error)
-		throw opengm::RuntimeError("optimizer did not succeed");
-
-	if (result == ReachedMinGap)
-		std::cout << "optimization converged to requested precision" << std::endl;
-
-	if (result == ReachedSteps)
-        std::cout << "optimization stopped after " << parameter().optimizerParameter_.steps << " iterations" << std::endl;
-}
-
-} // namespace learning
-
-} // namespace opengm
-
-#endif // OPENGM_LEARNING_STRUCT_MAX_MARGIN_HXX
-
diff --git a/include/opengm/learning/structured_perceptron.hxx b/include/opengm/learning/structured_perceptron.hxx
deleted file mode 100644
index f66cd07..0000000
--- a/include/opengm/learning/structured_perceptron.hxx
+++ /dev/null
@@ -1,208 +0,0 @@
-#pragma once
-#ifndef OPENGM_STRUCT_PERCEPTRON_LEARNER_HXX
-#define OPENGM_STRUCT_PERCEPTRON_LEARNER_HXX
-
-#include <vector>
-#include <opengm/inference/inference.hxx>
-#include <opengm/graphicalmodel/weights.hxx>
-#include <opengm/utilities/random.hxx>
-#include <opengm/learning/gradient-accumulator.hxx>
-
-
-namespace opengm {
-    namespace learning {
-
-
-
-           
-    template<class DATASET>
-    class StructuredPerceptron
-    {
-    public: 
-        typedef DATASET DatasetType;
-        typedef typename DATASET::GMType   GMType; 
-        typedef typename DATASET::LossType LossType;
-        typedef typename GMType::ValueType ValueType;
-        typedef typename GMType::IndexType IndexType;
-        typedef typename GMType::LabelType LabelType; 
-
-        typedef typename std::vector<LabelType>::const_iterator LabelIterator;
-        typedef FeatureAccumulator<GMType, LabelIterator> FeatureAcc;
-
-        class Parameter{
-        public:
-
-            enum LearningMode{
-                Online = 0,
-                Batch = 2
-            };
-
-
-            Parameter(){
-                eps_ = 0.00001;
-                maxIterations_ = 10000;
-                stopLoss_ = 0.0;
-                decayExponent_ = 0.0;
-                decayT0_ = 0.0;
-                learningMode_ = Online;
-            }       
-
-            double eps_;
-            size_t maxIterations_;
-            double stopLoss_;
-            double decayExponent_;
-            double decayT0_;
-            LearningMode learningMode_;
-        };
-
-
-        StructuredPerceptron(DATASET&, const Parameter& );
-
-        template<class INF>
-        void learn(const typename INF::Parameter& para); 
-        //template<class INF, class VISITOR>
-        //void learn(typename INF::Parameter para, VITITOR vis);
-
-        const opengm::learning::Weights<double>& getWeights(){return weights_;}
-        Parameter& getLerningParameters(){return para_;}
-
-
-        double getLearningRate( )const{
-            if(para_.decayExponent_<=0.000000001 && para_.decayExponent_>=-0.000000001 ){
-                return 1.0;
-            }
-            else{
-                return std::pow(para_.decayT0_ + static_cast<double>(iteration_+1),para_.decayExponent_);
-            }
-        }
-
-    private:
-
-        double updateWeights();
-
-        DATASET& dataset_;
-        opengm::learning::Weights<double> weights_;
-        Parameter para_;
-        size_t iteration_;
-        FeatureAcc featureAcc_;
-    }; 
-
-    template<class DATASET>
-    StructuredPerceptron<DATASET>::StructuredPerceptron(DATASET& ds, const Parameter& p )
-    : dataset_(ds), para_(p),iteration_(0),featureAcc_(ds.getNumberOfWeights(),false)
-    {
-        featureAcc_.resetWeights();
-        weights_ = opengm::learning::Weights<double>(ds.getNumberOfWeights());
-  
-    }
-
-
-    template<class DATASET>
-    template<class INF>
-    void StructuredPerceptron<DATASET>::learn(const typename INF::Parameter& para){
-
-
-        const size_t nModels = dataset_.getNumberOfModels();
-        const size_t nWegihts = dataset_.getNumberOfWeights();
-
-        
-
-
-
-
-        if(para_.learningMode_ == Parameter::Online){
-            RandomUniform<size_t> randModel(0, nModels);
-            std::cout<<"online mode\n";
-            for(iteration_=0 ; iteration_<para_.maxIterations_; ++iteration_){
-
-                if(iteration_%nModels==0){
-                    std::cout<<"loss :"<<dataset_. template getTotalLoss<INF>(para)<<"\n";
-                }
-
-
-                // get random model
-                const size_t gmi = randModel();
-                // lock the model
-                dataset_.lockModel(gmi);
-                const GMType & gm = dataset_.getModel(gmi);
-
-                // do inference
-                std::vector<LabelType> arg;
-                opengm::infer<INF>(gm, para, arg);
-                featureAcc_.resetWeights();
-                featureAcc_.accumulateModelFeatures(gm, dataset_.getGT(gmi).begin(), arg.begin());
-                dataset_.unlockModel(gmi);
-
-                // update weights
-                const double wChange =updateWeights();
-
-            }
-        }
-        else if(para_.learningMode_ == Parameter::Batch){
-            std::cout<<"batch mode\n";
-            for(iteration_=0 ; iteration_<para_.maxIterations_; ++iteration_){
-                // this 
-                if(iteration_%1==0){
-                    std::cout<<"loss :"<<dataset_. template getTotalLoss<INF>(para)<<"\n";
-                }
-
-                // reset the weights
-                featureAcc_.resetWeights();
-
-
-
-
-                //#pragma omp parallel for
-                for(size_t gmi=0; gmi<nModels; ++gmi)
-                {
-                    
-                    // lock the model
-                    //omp_set_lock(&modelLockUnlock);
-                    dataset_.lockModel(gmi);     
-                    //omp_unset_lock(&modelLockUnlock);
-                        
-                    
-
-                    const GMType & gm = dataset_.getModel(gmi);
-                    //run inference
-                    std::vector<LabelType> arg;
-                    opengm::infer<INF>(gm, para, arg);
-
-
-                    // 
-                    FeatureAcc featureAcc(nWegihts,false);
-                    featureAcc.accumulateModelFeatures(gm, dataset_.getGT(gmi).begin(), arg.begin());
-
-
-                    featureAcc_.accumulateFromOther(featureAcc);
-                    dataset_.unlockModel(gmi);    
-
-
-                }
-
-                // update the weights
-                const double wChange =updateWeights();
-
-            }
-        }
-
-        weights_ = dataset_.getWeights();
-    }
-
-
-    template<class DATASET>
-    double StructuredPerceptron<DATASET>::updateWeights(){
-        double wChange = 0.0;
-        const size_t nWegihts = dataset_.getNumberOfWeights();
-        for(size_t wi=0; wi<nWegihts; ++wi){
-            const double wOld = dataset_.getWeights().getWeight(wi);
-            const double wNew = wOld + getLearningRate()*featureAcc_.getWeight(wi);
-            wChange += std::pow(wOld-wNew,2);
-            dataset_.getWeights().setWeight(wi, wNew);
-        }
-        weights_ = dataset_.getWeights();
-        return wChange;
-    }
-}
-}
-#endif
diff --git a/include/opengm/learning/subgradient_ssvm.hxx b/include/opengm/learning/subgradient_ssvm.hxx
deleted file mode 100644
index 67514f9..0000000
--- a/include/opengm/learning/subgradient_ssvm.hxx
+++ /dev/null
@@ -1,353 +0,0 @@
-#pragma once
-#ifndef OPENGM_SUBGRADIENT_SSVM_LEARNER_HXX
-#define OPENGM_SUBGRADIENT_SSVM_LEARNER_HXX
-
-#include <iomanip>
-#include <vector>
-#include <opengm/inference/inference.hxx>
-#include <opengm/graphicalmodel/weights.hxx>
-#include <opengm/utilities/random.hxx>
-#include <opengm/learning/gradient-accumulator.hxx>
-#include <opengm/learning/weight_averaging.hxx>
-
-#ifdef WITH_OPENMP
-#include <omp.h>
-#endif
-
-#include <boost/circular_buffer.hpp>
-
-
-
-namespace opengm {
-    namespace learning {
-
-
-
-           
-    template<class DATASET>
-    class SubgradientSSVM
-    {
-    public: 
-        typedef DATASET DatasetType;
-        typedef typename DATASET::GMType   GMType; 
-        typedef typename DATASET::GMWITHLOSS GMWITHLOSS;
-        typedef typename DATASET::LossType LossType;
-        typedef typename GMType::ValueType ValueType;
-        typedef typename GMType::IndexType IndexType;
-        typedef typename GMType::LabelType LabelType; 
-        typedef opengm::learning::Weights<double> WeightsType;
-        typedef typename std::vector<LabelType>::const_iterator LabelIterator;
-        typedef FeatureAccumulator<GMType, LabelIterator> FeatureAcc;
-
-        typedef std::vector<LabelType> ConfType;
-        typedef boost::circular_buffer<ConfType> ConfBuffer;
-        typedef std::vector<ConfBuffer> ConfBufferVec;
-
-        class Parameter{
-        public:
-
-            enum LearningMode{
-                Online = 0,
-                Batch = 1
-            };
-
-
-            Parameter(){
-                eps_ = 0.00001;
-                maxIterations_ = 10000;
-                stopLoss_ = 0.0;
-                learningRate_ = 1.0;
-                C_ = 1.0;
-                learningMode_ = Batch;
-                averaging_ = -1;
-                nConf_ = 0;
-            }       
-
-            double eps_;
-            size_t maxIterations_;
-            double stopLoss_;
-            double learningRate_;
-            double C_;
-            LearningMode learningMode_;
-            int averaging_;
-            int nConf_;
-        };
-
-
-        SubgradientSSVM(DATASET&, const Parameter& );
-
-        template<class INF>
-        void learn(const typename INF::Parameter& para); 
-        //template<class INF, class VISITOR>
-        //void learn(typename INF::Parameter para, VITITOR vis);
-
-        const opengm::learning::Weights<double>& getWeights(){return weights_;}
-        Parameter& getLerningParameters(){return para_;}
-
-
-        double getLearningRate( )const{
-            if(para_.decayExponent_<=0.000000001 && para_.decayExponent_>=-0.000000001 ){
-                return 1.0;
-            }
-            else{
-                return std::pow(para_.decayT0_ + static_cast<double>(iteration_),para_.decayExponent_);
-            }
-        }
-
-        double getLoss(const GMType & gm ,const GMWITHLOSS  & gmWithLoss, std::vector<LabelType> & labels){
-
-            double loss = 0 ;
-            std::vector<LabelType> subConf(20,0);
-
-            for(size_t fi=gm.numberOfFactors(); fi<gmWithLoss.numberOfFactors(); ++fi){
-                for(size_t v=0; v<gmWithLoss[fi].numberOfVariables(); ++v){
-                    subConf[v] = labels[ gmWithLoss[fi].variableIndex(v)];
-                }
-                loss +=  gmWithLoss[fi](subConf.begin());
-            }
-            return loss;
-        }
-
-    private:
-
-        double updateWeights();
-
-        DATASET& dataset_;
-        WeightsType  weights_;
-        Parameter para_;
-        size_t iteration_;
-        FeatureAcc featureAcc_;
-        WeightRegularizer<ValueType> wReg_;
-        WeightAveraging<double> weightAveraging_;
-    }; 
-
-    template<class DATASET>
-    SubgradientSSVM<DATASET>::SubgradientSSVM(DATASET& ds, const Parameter& p )
-    :   dataset_(ds), 
-        para_(p),
-        iteration_(0),
-        featureAcc_(ds.getNumberOfWeights()),
-        wReg_(2, 1.0/p.C_),
-        weightAveraging_(ds.getWeights(),p.averaging_)
-    {
-        featureAcc_.resetWeights();
-        weights_ = opengm::learning::Weights<double>(ds.getNumberOfWeights());
-    }
-
-
-    template<class DATASET>
-    template<class INF>
-    void SubgradientSSVM<DATASET>::learn(const typename INF::Parameter& para){
-
-
-        typedef typename INF:: template RebindGm<GMWITHLOSS>::type InfLossGm;
-        typedef typename InfLossGm::Parameter InfLossGmParam;
-        InfLossGmParam infLossGmParam(para);
-
-
-        const size_t nModels = dataset_.getNumberOfModels();
-        const size_t nWegihts = dataset_.getNumberOfWeights();
-
-        
-        for(size_t wi=0; wi<nWegihts; ++wi){
-            dataset_.getWeights().setWeight(wi, 0.0);
-        }
-        std::cout<<"PARAM nConf_"<<para_.nConf_<<"\n";
-        const bool useWorkingSets = para_.nConf_>0;
-
-        ConfBufferVec buffer(useWorkingSets? nModels : 0, ConfBuffer(para_.nConf_));
-
-        std::vector<bool> isViolated(para_.nConf_);
-
-        if(para_.learningMode_ == Parameter::Online){
-            RandomUniform<size_t> randModel(0, nModels);
-            //std::cout<<"online mode\n";
-            for(iteration_=0 ; iteration_<para_.maxIterations_; ++iteration_){
-
-
-
-
-                // get random model
-                const size_t gmi = randModel();
-                // lock the model
-                dataset_.lockModel(gmi);
-                const GMWITHLOSS & gmWithLoss = dataset_.getModelWithLoss(gmi);
-
-                // do inference
-                std::vector<LabelType> arg;
-                opengm::infer<InfLossGm>(gmWithLoss, infLossGmParam, arg);
-                featureAcc_.resetWeights();
-                featureAcc_.accumulateModelFeatures(dataset_.getModel(gmi), dataset_.getGT(gmi).begin(), arg.begin());
-                dataset_.unlockModel(gmi);
-
-                // update weights
-                const double wChange =updateWeights();
-
-                if(iteration_%nModels*2 == 0 ){
-                    std::cout << '\r'
-                              << std::setw(6) << std::setfill(' ') << iteration_ << ':'
-                              << std::setw(8) << dataset_. template getTotalLossParallel<INF>(para) <<"  "<< std::flush;
-
-                }
-
-            }
-        }
-        else if(para_.learningMode_ == Parameter::Batch){
-            //std::cout<<"batch mode\n";
-            for(iteration_=0 ; iteration_<para_.maxIterations_; ++iteration_){
-                // this 
-                
-
-                // reset the weights
-                featureAcc_.resetWeights();
-                double totalLoss = 0;
-
-                #ifdef WITH_OPENMP
-                omp_lock_t modelLockUnlock;
-                omp_init_lock(&modelLockUnlock);
-                omp_lock_t featureAccLock;
-                omp_init_lock(&featureAccLock);
-                #pragma omp parallel for reduction(+:totalLoss)  
-                #endif
-                for(size_t gmi=0; gmi<nModels; ++gmi){
-                    
-                    // lock the model
-                    #ifdef WITH_OPENMP
-                    omp_set_lock(&modelLockUnlock);
-                    dataset_.lockModel(gmi);     
-                    omp_unset_lock(&modelLockUnlock);
-                    #else
-                    dataset_.lockModel(gmi);     
-                    #endif
-                        
-                    
-
-                    const GMWITHLOSS & gmWithLoss = dataset_.getModelWithLoss(gmi);
-                    const GMType     & gm = dataset_.getModel(gmi);
-                    //run inference
-                    std::vector<LabelType> arg;
-                    opengm::infer<InfLossGm>(gmWithLoss, infLossGmParam, arg);
-
-                    totalLoss = totalLoss + getLoss(gm, gmWithLoss, arg);
-
-             
-                    if(useWorkingSets){
-                        // append current solution
-                        buffer[gmi].push_back(arg);
-
-                        size_t vCount=0;
-                        // check which violates
-                        for(size_t cc=0; cc<buffer[gmi].size(); ++cc){
-                            const double mLoss = dataset_.getLoss(buffer[gmi][cc], gmi);
-                            const double argVal = gm.evaluate(buffer[gmi][cc]);
-                            const double gtVal =  gm.evaluate(dataset_.getGT(gmi));
-                            const double ll = (argVal - mLoss) - gtVal;
-                            //std::cout<<" argVal "<<argVal<<" gtVal "<<gtVal<<" mLoss "<<mLoss<<"   VV "<<ll<<"\n";
-                            if(ll<0){
-                                isViolated[cc] = true;
-                                ++vCount;
-                            }
-                        }
-                        FeatureAcc featureAcc(nWegihts);
-                        for(size_t cc=0; cc<buffer[gmi].size(); ++cc){
-                            if(isViolated[cc]){
-
-                                featureAcc.accumulateModelFeatures(gm, dataset_.getGT(gmi).begin(), buffer[gmi][cc].begin(),1.0/double(vCount));
-
-                            }
-                        }
-                        #ifdef WITH_OPENMP
-                        omp_set_lock(&featureAccLock);
-                        featureAcc_.accumulateFromOther(featureAcc);
-                        omp_unset_lock(&featureAccLock);
-                        #else
-                        featureAcc_.accumulateFromOther(featureAcc);
-                        #endif
-                    }
-                    else{
-                        FeatureAcc featureAcc(nWegihts);
-                        featureAcc.accumulateModelFeatures(gm, dataset_.getGT(gmi).begin(), arg.begin());
-                        #ifdef WITH_OPENMP
-                        omp_set_lock(&featureAccLock);
-                        featureAcc_.accumulateFromOther(featureAcc);
-                        omp_unset_lock(&featureAccLock);
-                        #else
-                        featureAcc_.accumulateFromOther(featureAcc);
-                        #endif
-                    }
-
-
-
-                    // acc features
-                    //omp_set_lock(&featureAccLock);
-                    //featureAcc_.accumulateFromOther(featureAcc);
-                    //omp_unset_lock(&featureAccLock);
-
-                    // unlock the model
-                    #ifdef WITH_OPENMP
-                    omp_set_lock(&modelLockUnlock);
-                    dataset_.unlockModel(gmi);     
-                    omp_unset_lock(&modelLockUnlock);
-                    #else
-                    dataset_.unlockModel(gmi);     
-                    #endif
-
-
-                }
-
-                //const double wRegVal = wReg_(dataset_.getWeights());
-                //const double tObj = std::abs(totalLoss) + wRegVal;
-                if(iteration_%1==0){
-                    std::cout << '\r'
-                              << std::setw(6) << std::setfill(' ') << iteration_ << ':'
-                              << std::setw(8) << -1.0*totalLoss <<"  "<< std::flush;
-                }
-                // update the weights
-                const double wChange =updateWeights();
-                
-            }
-        }
-        weights_ = dataset_.getWeights();
-    }
-
-
-    template<class DATASET>
-    double SubgradientSSVM<DATASET>::updateWeights(){
-
-        const size_t nWegihts = dataset_.getNumberOfWeights();
-
-        WeightsType p(nWegihts);
-        WeightsType newWeights(nWegihts);
-
-        if(para_.learningMode_ == Parameter::Batch){
-            for(size_t wi=0; wi<nWegihts; ++wi){
-                p[wi] =  dataset_.getWeights().getWeight(wi);
-                p[wi] += para_.C_ * featureAcc_.getWeight(wi)/double(dataset_.getNumberOfModels());
-            }
-        }
-        else{
-            for(size_t wi=0; wi<nWegihts; ++wi){
-                p[wi] =  dataset_.getWeights().getWeight(wi);
-                p[wi] += para_.C_ * featureAcc_.getWeight(wi);
-            }
-        }
-
-
-        double wChange = 0.0;
-        
-        for(size_t wi=0; wi<nWegihts; ++wi){
-            const double wOld = dataset_.getWeights().getWeight(wi);
-            const double wNew = wOld - (para_.learningRate_/double(iteration_+1))*p[wi];
-            newWeights[wi] = wNew;
-        }
-
-        weightAveraging_(newWeights);
-
-
-
-        weights_ = dataset_.getWeights();
-        return wChange;
-    }
-}
-}
-#endif
diff --git a/include/opengm/learning/weight_averaging.hxx b/include/opengm/learning/weight_averaging.hxx
deleted file mode 100644
index 815ab4b..0000000
--- a/include/opengm/learning/weight_averaging.hxx
+++ /dev/null
@@ -1,68 +0,0 @@
-#ifndef OPENGM_LEARNING_WEIGHT_AVERAGING_HXX
-#define OPENGM_LEARNING_WEIGHT_AVERAGING_HXX
-
-
-
-namespace opengm{
-namespace learning{
-
-
-    template<class T>
-    class WeightAveraging{
-    public:
-        WeightAveraging(Weights<T> & weights, int order=2)
-        :   weights_(&weights),
-            order_(order),
-            iteration_(1){
-        }
-        WeightAveraging()
-        :   weights_(NULL),
-            order_(2),
-            iteration_(1){
-        }
-        void setWeights(Weights<T> & weights){
-            weights_ = &weights;
-        }
-
-        template<class U>
-        void operator()(const Weights<U> & weights){
-            const T t = static_cast<T>(iteration_);
-            if(order_ == -1){
-                *weights_ = weights;
-            }
-            else if(order_ == 0){
-                throw opengm::RuntimeError("running average is not yet implemented");
-            }
-            else if(order_==1){
-                const T rho = 2.0 / (t + 2.0);
-                for(size_t i=0; i<weights_->size(); ++i){
-                    (*weights_)[i] =  (*weights_)[i]*(1.0 - rho) + weights[i]*rho;
-                }
-            }
-            else if(order_ == 2){
-                const T rho = 6.0 * (t+1.0) / ( (t+2.0)*(2.0*t + 3.0) );
-                for(size_t i=0; i<weights_->size(); ++i){
-                    (*weights_)[i] =  (*weights_)[i]*(1.0 - rho) + weights[i]*rho;
-                }
-            }
-            else{
-                throw opengm::RuntimeError("order must be -1,0,1 or 2");
-            }
-            ++iteration_;
-        }
-        const Weights<T> & weights()const{
-            return weights_;
-        }
-    private:
-        Weights<T>  * weights_;
-        int order_;
-        size_t iteration_;
-    };
-
-
-
-}   // end namespace opengm
-}   // end namespace opengm
-
-
-#endif /*OPENGM_LEARNING_WEIGHT_AVERAGING_HXX*/
diff --git a/src/unittest/CMakeLists.txt b/src/unittest/CMakeLists.txt
index bbf3411..27ac355 100644
--- a/src/unittest/CMakeLists.txt
+++ b/src/unittest/CMakeLists.txt
@@ -122,5 +122,4 @@ if(BUILD_TESTING)
    add_test(test-canonicalview ${CMAKE_CURRENT_BINARY_DIR}/test-canonicalview)
 
    add_subdirectory(inference)
-   add_subdirectory(learning)
 endif()
diff --git a/src/unittest/learning/CMakeLists.txt b/src/unittest/learning/CMakeLists.txt
deleted file mode 100644
index 9c46812..0000000
--- a/src/unittest/learning/CMakeLists.txt
+++ /dev/null
@@ -1,51 +0,0 @@
-
-add_definitions(-DOPENGM_DEBUG)
-
-if(BUILD_TESTING)
-
-    add_executable(test-gridsearch-learner test_gridsearch_learner.cxx ${headers})
-    add_test(test-gridsearch-learner ${CMAKE_CURRENT_BINARY_DIR}/test-gridsearch-learner)
-
-    add_executable(test-maximum-likelihood-learner test_maximum_likelihood_learner.cxx ${headers})
-    add_test(test-maximum-likelihood-learner ${CMAKE_CURRENT_BINARY_DIR}/test-maximum-likelihood-learner)
-
-    if(WITH_HDF5)
-        add_executable(test-dataset-io test_dataset_io.cxx ${headers})
-        target_link_libraries(test-dataset-io ${HDF5_LIBRARIES})
-        add_test(test-dataset-io ${CMAKE_CURRENT_BINARY_DIR}/test-dataset-io)
-
-        add_executable(test-dataset test_dataset.cxx ${headers})
-        target_link_libraries(test-dataset ${HDF5_LIBRARIES})
-        add_test(test-dataset ${CMAKE_CURRENT_BINARY_DIR}/test-dataset)
-    endif()
-
-    if(WITH_TRWS)
-        if(WITH_GUROBI)
-            ADD_EXECUTABLE(test-learning test_learning.cxx ${headers})
-            target_link_libraries(test-learning ${GUROBI_CXX_LIBRARY} ${GUROBI_LIBRARY})
-            target_link_libraries(test-learning external-library-trws)
-            add_test(test-learning ${CMAKE_CURRENT_BINARY_DIR}/test-learning) 
-        else()
-            if(WITH_CPLEX)
-                ADD_EXECUTABLE(test-learning test_learning.cxx ${headers})
-                target_link_libraries(test-learning ${CPLEX_LIBRARIES} ${CMAKE_THREAD_LIBS_INIT})
-                target_link_libraries(test-learning external-library-trws)
-                add_test(test-learning ${CMAKE_CURRENT_BINARY_DIR}/test-learning)
-            endif()
-        endif()
-    endif()
-
-
-    if(WITH_CPLEX)
-        ADD_EXECUTABLE(test-subgradient-ssvm test_subgradient_ssvm.cxx ${headers})
-        if(OPENMP_FOUND)
-            SET_TARGET_PROPERTIES(test-subgradient-ssvm PROPERTIES COMPILE_FLAGS "${OpenMP_CXX_FLAGS}")
-            SET_TARGET_PROPERTIES(test-subgradient-ssvm PROPERTIES LINK_FLAGS "${OpenMP_CXX_FLAGS}")
-        endif()
-        target_link_libraries(test-subgradient-ssvm ${CPLEX_LIBRARIES} ${CMAKE_THREAD_LIBS_INIT})
-        add_test(test-subgradient-ssvm ${CMAKE_CURRENT_BINARY_DIR}/test-subgradient-ssvm)
-    endif()
-
-  add_executable(test-generalized-hammingloss test_generalized_hammingloss.cxx ${headers})
-  add_test(test-generalized-hammingloss ${CMAKE_CURRENT_BINARY_DIR}/test-generalized-hammingloss)
-endif()
diff --git a/src/unittest/learning/test_dataset.cxx b/src/unittest/learning/test_dataset.cxx
deleted file mode 100644
index b84e2ec..0000000
--- a/src/unittest/learning/test_dataset.cxx
+++ /dev/null
@@ -1,150 +0,0 @@
-#include <vector>
-
-#include <opengm/functions/explicit_function.hxx>
-#include <opengm/unittests/test.hxx>
-#include <opengm/graphicalmodel/graphicalmodel.hxx>
-#include <opengm/operations/adder.hxx>
-#include <opengm/operations/minimizer.hxx>
-#include <opengm/inference/icm.hxx>
-#include <opengm/utilities/metaprogramming.hxx>
-
-#include <opengm/functions/learnable/lpotts.hxx>
-#include <opengm/functions/learnable/lweightedsum_of_functions.hxx>
-//#include <opengm/learning/dataset/testdataset.hxx>
-//#include <opengm/learning/dataset/testdataset2.hxx>
-#include <opengm/learning/dataset/dataset_io.hxx>
-#include <opengm/learning/dataset/dataset.hxx>
-#include <opengm/learning/dataset/testdatasets.hxx>
-#include <opengm/learning/loss/noloss.hxx>
-#include <opengm/learning/loss/hammingloss.hxx>
-
-
-//*************************************
-typedef double ValueType;
-typedef size_t IndexType;
-typedef size_t LabelType; 
-typedef opengm::meta::TypeListGenerator<opengm::ExplicitFunction<ValueType,IndexType,LabelType>, opengm::functions::learnable::LPotts<ValueType,IndexType,LabelType>, opengm::functions::learnable::LWeightedSumOfFunctions<ValueType,IndexType,LabelType> >::type FunctionListType;
-typedef opengm::GraphicalModel<ValueType,opengm::Adder, FunctionListType, opengm::DiscreteSpace<IndexType,LabelType> > GM; 
-//typedef opengm::datasets::TestDataset<GM>  DS1;
-//typedef opengm::datasets::Dataset<GM>      DS;
-
-typedef opengm::learning::HammingLoss     LOSS;
-//typedef opengm::learning::NoLoss                 LOSS;
-typedef opengm::datasets::TestDataset1<GM,LOSS>  DS0;
-typedef opengm::datasets::TestDataset1<GM,LOSS>  DS1;
-typedef opengm::datasets::TestDataset2<GM,LOSS>  DS2;
-typedef opengm::datasets::Dataset<GM,LOSS>       DS;
-
-//*************************************
-
-template<class DatasetType>
-struct DatasetTest {
-
-   DatasetType& dataset_;
-
-   DatasetTest(DatasetType& data): dataset_(data) {}
-
-   void testInitialization() {
-      std::cout << "Initialize Model:" << std::endl;
-      // create a new dataset
-      DatasetType dataset;
-   }
-
-   void callModelFunctions(){
-
-         std::cout << "calling Model functions:" << std::endl;
-         std::cout << "\tlocking all available Models" << std::endl;
-
-         for(int i; i<dataset_.getNumberOfModels();i++)
-         {
-            dataset_.lockModel(i);
-            dataset_.unlockModel(i);
-         }
-
-         std::cout << "\tgetModel with and without loss" << std::endl;
-         for(int i; i<dataset_.getNumberOfModels();i++)
-         {
-            OPENGM_TEST(dataset_.getModel(i).numberOfVariables() == dataset_.getModelWithLoss(i).numberOfVariables());
-            OPENGM_TEST(dataset_.getModel(i).numberOfFactors() <=  dataset_.getModelWithLoss(i).numberOfFactors());
-         }
-
-         std::cout << "\tgetGT" << std::endl;
-         for(int i; i<dataset_.getNumberOfModels();i++)
-         {
-            std::cout << dataset_.getGT(i).size() << std::endl;
-         }
-
-      }
-
-   void getInfo(){
-         std::cout << "Info of data size:" << std::endl;
-         std::cout << "\tnumberOfWeights\t" << dataset_.getNumberOfWeights() << std::endl;
-         std::cout << "\tnumberOfModels\t" << dataset_.getNumberOfModels() << std::endl;
-
-         opengm::learning::Weights<ValueType> weights = dataset_.getWeights();
-         std::cout << "Beginning of weight vector: ";
-         for(int i; i<std::min(dataset_.getNumberOfWeights(),size_t(10));i++)
-         {
-            std::cout << dataset_.getWeights()[i] << " ";
-         }
-         std::cout << std::endl;
-   }
-
-   void run() {
-      this->testInitialization();
-      this->getInfo();
-      this->callModelFunctions();
-   }
-};
-
-
-int main() {
-   std::cout << " Includes are fine :-) " << std::endl; 
-  
-   {
-      // initialize your data here
-      // eventually you need to load it from file
-      DS data;
-  
-      std::cout << "Start test DS" <<std::endl;
-      //run tests on dataset
-      DatasetTest<DS >t(data);
-      t.run();
-   } 
-
-   {
-      // initialize your data here
-      // eventually you need to load it from file
-      DS0 data;
-
-      std::cout << "Start test DS0" <<std::endl;
-      //run tests on dataset
-      DatasetTest<DS0 >t(data);
-      t.run();
-
-   }
-   {
-      // initialize your data here
-      // eventually you need to load it from file
-      DS1 data;
-
-      std::cout << "Start test DS1" <<std::endl;
-      //run tests on dataset
-      DatasetTest<DS1 >t(data);
-      t.run();
-
-   }
-   {
-      // initialize your data here
-      // eventually you need to load it from file
-      DS2 data;
-
-      std::cout << "Start test DS2" <<std::endl;
-      //run tests on dataset
-      DatasetTest<DS2 >t(data);
-      t.run();
-
-   }
-
-
-}
diff --git a/src/unittest/learning/test_dataset_io.cxx b/src/unittest/learning/test_dataset_io.cxx
deleted file mode 100644
index 65d98d4..0000000
--- a/src/unittest/learning/test_dataset_io.cxx
+++ /dev/null
@@ -1,101 +0,0 @@
-#include <vector>
-
-#include <opengm/functions/explicit_function.hxx>
-#include <opengm/unittests/test.hxx>
-#include <opengm/graphicalmodel/graphicalmodel.hxx>
-#include <opengm/operations/adder.hxx>
-#include <opengm/operations/minimizer.hxx>
-#include <opengm/inference/icm.hxx>
-#include <opengm/utilities/metaprogramming.hxx>
-
-#include <opengm/functions/learnable/lpotts.hxx>
-#include <opengm/functions/learnable/lweightedsum_of_functions.hxx>
-//#include <opengm/learning/dataset/testdataset.hxx>
-//#include <opengm/learning/dataset/testdataset2.hxx>
-#include <opengm/learning/dataset/dataset_io.hxx>
-#include <opengm/learning/dataset/dataset.hxx>
-#include <opengm/learning/dataset/testdatasets.hxx>
-#include <opengm/learning/loss/noloss.hxx>
-#include <opengm/learning/loss/hammingloss.hxx>
-#include <opengm/learning/loss/generalized-hammingloss.hxx>
-
-
-//*************************************
-typedef double ValueType;
-typedef size_t IndexType;
-typedef size_t LabelType; 
-typedef opengm::meta::TypeListGenerator<opengm::ExplicitFunction<ValueType,IndexType,LabelType>, opengm::functions::learnable::LPotts<ValueType,IndexType,LabelType>, opengm::functions::learnable::LWeightedSumOfFunctions<ValueType,IndexType,LabelType> >::type FunctionListType;
-typedef opengm::GraphicalModel<ValueType,opengm::Adder, FunctionListType, opengm::DiscreteSpace<IndexType,LabelType> > GM; 
-typedef opengm::learning::NoLoss                 LOSS1;
-typedef opengm::learning::HammingLoss            LOSS2;
-typedef opengm::learning::GeneralizedHammingLoss LOSS3;
-typedef opengm::datasets::TestDataset1<GM,LOSS1>  DS11;
-typedef opengm::datasets::TestDataset2<GM,LOSS1>  DS21;
-typedef opengm::datasets::TestDataset1<GM,LOSS2>  DS12;
-typedef opengm::datasets::TestDataset2<GM,LOSS2>  DS22;
-typedef opengm::datasets::TestDataset1<GM,LOSS3>  DS13;
-typedef opengm::datasets::Dataset<GM,LOSS1>       DS1;
-typedef opengm::datasets::Dataset<GM,LOSS2>       DS2;
-typedef opengm::datasets::Dataset<GM,LOSS3>       DS3;
-
-//*************************************
-
-
-int main() {
-   std::cout << " Includes are fine :-) " << std::endl; 
-  
-   {
-      DS11 dataset;
-      std::cout << "Dataset includes " << dataset.getNumberOfModels() << " instances and has " << dataset.getNumberOfWeights() << " parameters."<<std::endl;
-      opengm::datasets::DatasetSerialization::save(dataset,"./","dataset11_");
-      std::cout <<"done!" <<std::endl;
-   }
-   {
-      DS12 dataset;
-      std::cout << "Dataset includes " << dataset.getNumberOfModels() << " instances and has " << dataset.getNumberOfWeights() << " parameters."<<std::endl;
-      opengm::datasets::DatasetSerialization::save(dataset,"./","dataset12_");
-      std::cout <<"done!" <<std::endl;
-   }
-   {
-      DS21 dataset;
-      std::cout << "Dataset includes " << dataset.getNumberOfModels() << " instances and has " << dataset.getNumberOfWeights() << " parameters."<<std::endl;
-      opengm::datasets::DatasetSerialization::save(dataset,"./","dataset21_");
-      std::cout <<"done!" <<std::endl;
-   }
-   {
-      DS22 dataset;
-      std::cout << "Dataset includes " << dataset.getNumberOfModels() << " instances and has " << dataset.getNumberOfWeights() << " parameters."<<std::endl;
-      opengm::datasets::DatasetSerialization::save(dataset,"./","dataset22_");
-      std::cout <<"done!" <<std::endl;
-   }
-   {
-      DS13 dataset;
-      std::cout << "Dataset includes " << dataset.getNumberOfModels() << " instances and has " << dataset.getNumberOfWeights() << " parameters."<<std::endl;
-      opengm::datasets::DatasetSerialization::save(dataset,"./","dataset13_");
-      std::cout <<"done!" <<std::endl;
-   }
-
-   #ifndef CI
-   {
-      DS1 ds;
-      opengm::datasets::DatasetSerialization::loadAll("./","dataset11_",ds);
-   }
-   {
-      DS1 ds;
-      opengm::datasets::DatasetSerialization::loadAll("./","dataset21_",ds);
-   }
-   {
-      DS2 ds;
-      opengm::datasets::DatasetSerialization::loadAll("./","dataset12_",ds);
-   }
-   {
-      DS2 ds;
-      opengm::datasets::DatasetSerialization::loadAll("./","dataset22_",ds);
-   }
-   {
-      DS3 ds;
-      opengm::datasets::DatasetSerialization::loadAll("./","dataset13_",ds);
-   }
-   #endif
-   std::cout << "test successful." << std::endl;
-}
diff --git a/src/unittest/learning/test_generalized_hammingloss.cxx b/src/unittest/learning/test_generalized_hammingloss.cxx
deleted file mode 100644
index 743a4e3..0000000
--- a/src/unittest/learning/test_generalized_hammingloss.cxx
+++ /dev/null
@@ -1,65 +0,0 @@
-#include <vector>
-#include <iostream>
-
-#include <opengm/learning/loss/generalized-hammingloss.hxx>
-#include <opengm/graphicalmodel/graphicalmodel.hxx>
-#include <opengm/graphicalmodel/graphicalmodel_factor.hxx>
-
-//*************************************
-typedef double ValueType;
-typedef size_t IndexType;
-typedef size_t LabelType;
-typedef opengm::meta::TypeListGenerator<opengm::ExplicitFunction<ValueType,IndexType,LabelType> >::type FunctionListType;
-typedef opengm::GraphicalModel<ValueType,opengm::Adder, FunctionListType, opengm::DiscreteSpace<IndexType,LabelType> > GM;
-
-//*************************************
-
-
-int main() {
-
-   opengm::learning::GeneralizedHammingLoss::Parameter param;
-   param.labelLossMultiplier_.push_back(2.0);
-   param.labelLossMultiplier_.push_back(1.0);
-   param.labelLossMultiplier_.push_back(0.5);
-
-   param.nodeLossMultiplier_.push_back(5.0);
-   param.nodeLossMultiplier_.push_back(6.0);
-   param.nodeLossMultiplier_.push_back(7.0);
-   param.nodeLossMultiplier_.push_back(8.0);
-
-   // create loss
-   opengm::learning::GeneralizedHammingLoss loss(param);
-
-   // evaluate for a test point
-   std::vector<size_t> labels;
-   labels.push_back(0);
-   labels.push_back(1);
-   labels.push_back(2);
-   labels.push_back(2);
-
-   std::vector<size_t> ground_truth;
-   ground_truth.push_back(1);
-   ground_truth.push_back(1);
-   ground_truth.push_back(1);
-   ground_truth.push_back(1);
-
-
-   // add loss to a model and evaluate for a given labeling
-   GM gm;
-   size_t numberOfLabels = 3;
-   gm.addVariable(numberOfLabels);
-   gm.addVariable(numberOfLabels);
-   gm.addVariable(numberOfLabels);
-   gm.addVariable(numberOfLabels);
-   OPENGM_ASSERT_OP(loss.loss(gm, labels.begin(), labels.end(), ground_truth.begin(), ground_truth.end()), ==, 17.5);
-
-   // add a unary to node 2 (if indexed from 1)
-   opengm::ExplicitFunction<GM::ValueType,GM::IndexType,GM::LabelType> f(&numberOfLabels, &(numberOfLabels)+1, 2.0);
-   size_t variableIndex = 1;
-   gm.addFactor(gm.addFunction(f), &variableIndex, &variableIndex+1);
-   OPENGM_ASSERT_OP(gm.evaluate(labels.begin()), ==, 2.0);
-
-   // loss augmented model:
-   loss.addLoss(gm, ground_truth.begin());
-   OPENGM_ASSERT_OP(gm.evaluate(labels.begin()), ==, -15.5);
-}
diff --git a/src/unittest/learning/test_gridsearch_learner.cxx b/src/unittest/learning/test_gridsearch_learner.cxx
deleted file mode 100644
index 3684cc2..0000000
--- a/src/unittest/learning/test_gridsearch_learner.cxx
+++ /dev/null
@@ -1,90 +0,0 @@
-#include <vector>
-
-#include <opengm/functions/explicit_function.hxx>
-#include <opengm/unittests/test.hxx>
-#include <opengm/graphicalmodel/graphicalmodel.hxx>
-#include <opengm/operations/adder.hxx>
-#include <opengm/operations/minimizer.hxx>
-#include <opengm/inference/icm.hxx>
-#include <opengm/utilities/metaprogramming.hxx>
-
-#include <opengm/functions/learnable/lpotts.hxx>
-#include <opengm/functions/learnable/lweightedsum_of_functions.hxx>
-#include <opengm/learning/gridsearch-learning.hxx>
-#include <opengm/learning/loss/hammingloss.hxx>
-//#include <opengm/learning/dataset/testdataset.hxx>
-//#include <opengm/learning/dataset/testdataset2.hxx>
-#include <opengm/learning/dataset/testdatasets.hxx>
-
-
-//*************************************
-typedef double ValueType;
-typedef size_t IndexType;
-typedef size_t LabelType; 
-typedef opengm::meta::TypeListGenerator<opengm::ExplicitFunction<ValueType,IndexType,LabelType>, opengm::functions::learnable::LPotts<ValueType,IndexType,LabelType>, opengm::functions::learnable::LWeightedSumOfFunctions<ValueType,IndexType,LabelType> >::type FunctionListType;
-typedef opengm::GraphicalModel<ValueType,opengm::Adder, FunctionListType, opengm::DiscreteSpace<IndexType,LabelType> > GM; 
-//typedef opengm::datasets::TestDataset<GM>  DS;
-//typedef opengm::datasets::TestDataset2<GM> DS2;
-typedef opengm::learning::HammingLoss     LOSS;
-typedef opengm::ICM<GM,opengm::Minimizer> INF;
-typedef opengm::datasets::TestDataset1<GM,LOSS> DS1;
-typedef opengm::datasets::TestDataset2<GM,LOSS> DS2;
-
-//*************************************
-
-
-int main() {
-   std::cout << " Includes are fine :-) " << std::endl; 
-   /* 
-   {
-      DS dataset;
-      std::cout << "Dataset includes " << dataset.getNumberOfModels() << " instances and has " << dataset.getNumberOfWeights() << " parameters."<<std::endl;
-      
-      
-      opengm::learning::GridSearchLearner<DS,LOSS>::Parameter para;
-      para.parameterUpperbound_.resize(1,1);
-      para.parameterLowerbound_.resize(1,0);
-      para.testingPoints_.resize(1,10);
-      opengm::learning::GridSearchLearner<DS,LOSS> learner(dataset,para);
-      
-      
-      INF::Parameter infPara;
-      learner.learn<INF>(infPara);
-      
-   } 
-   */
-   {
-      DS1 dataset;
-      std::cout << "Dataset includes " << dataset.getNumberOfModels() << " instances and has " << dataset.getNumberOfWeights() << " parameters."<<std::endl;
-      
-      
-      opengm::learning::GridSearchLearner<DS1>::Parameter para;
-      para.parameterUpperbound_.resize(1,1);
-      para.parameterLowerbound_.resize(1,0);
-      para.testingPoints_.resize(1,10);
-      opengm::learning::GridSearchLearner<DS1> learner(dataset,para);
-      
-      
-      INF::Parameter infPara;
-      learner.learn<INF>(infPara);
-      
-   }
-
-   {
-      DS2 dataset;
-      std::cout << "Dataset includes " << dataset.getNumberOfModels() << " instances and has " << dataset.getNumberOfWeights() << " parameters."<<std::endl;
-      
-      
-      opengm::learning::GridSearchLearner<DS2>::Parameter para;
-      para.parameterUpperbound_.resize(3,1);
-      para.parameterLowerbound_.resize(3,0);
-      para.testingPoints_.resize(3,5);
-      opengm::learning::GridSearchLearner<DS2> learner(dataset,para);
-      
-      
-      INF::Parameter infPara;
-      learner.learn<INF>(infPara);
-   }
-
-
-}
diff --git a/src/unittest/learning/test_learning.cxx b/src/unittest/learning/test_learning.cxx
deleted file mode 100644
index 289dfdc..0000000
--- a/src/unittest/learning/test_learning.cxx
+++ /dev/null
@@ -1,233 +0,0 @@
-#include <vector>
-
-#include <opengm/functions/explicit_function.hxx>
-#include <opengm/unittests/test.hxx>
-#include <opengm/graphicalmodel/graphicalmodel.hxx>
-#include <opengm/operations/adder.hxx>
-#include <opengm/operations/minimizer.hxx>
-#include <opengm/inference/external/trws.hxx>
-#include <opengm/utilities/metaprogramming.hxx>
-
-#ifdef WITH_GUROBI
-#include <opengm/inference/lpgurobi.hxx>
-#else
-#include <opengm/inference/lpcplex.hxx>
-#endif
-
-
-#include <opengm/functions/learnable/lpotts.hxx>
-#include <opengm/functions/learnable/lweightedsum_of_functions.hxx>
-#include <opengm/learning/struct-max-margin.hxx>
-#include <opengm/learning/loss/hammingloss.hxx>
-//#include <opengm/learning/dataset/testdataset.hxx>
-//#include <opengm/learning/dataset/testdataset2.hxx>
-#include <opengm/learning/dataset/testdatasets.hxx>
-#include <opengm/learning/dataset/editabledataset.hxx>
-
-
-//*************************************
-typedef double ValueType;
-typedef size_t IndexType;
-typedef size_t LabelType; 
-typedef opengm::meta::TypeListGenerator<opengm::ExplicitFunction<ValueType,IndexType,LabelType>, opengm::functions::learnable::LPotts<ValueType,IndexType,LabelType>, opengm::functions::learnable::LWeightedSumOfFunctions<ValueType,IndexType,LabelType> >::type FunctionListType;
-typedef opengm::GraphicalModel<ValueType,opengm::Adder, FunctionListType, opengm::DiscreteSpace<IndexType,LabelType> > GM; 
-typedef opengm::learning::HammingLoss     LOSS;
-
-#ifdef WITH_GUROBI
-typedef opengm::LPGurobi<GM,opengm::Minimizer> INF;
-#else
-typedef opengm::LPCplex<GM,opengm::Minimizer> INF;
-#endif
-typedef opengm::datasets::EditableTestDataset<GM,LOSS> EDS;
-typedef opengm::datasets::TestDataset1<GM,LOSS> DS1;
-typedef opengm::datasets::TestDataset2<GM,LOSS> DS2;
-typedef opengm::datasets::TestDatasetSimple<GM,LOSS> DSS;
-
-//*************************************
-
-
-int main() {
-   std::cout << " Includes are fine :-) " << std::endl; 
-
-   {
-	  DSS dataset(5);
-	  std::cout << "Dataset includes " << dataset.getNumberOfModels() << " instances and has " << dataset.getNumberOfWeights() << " parameters."<<std::endl;
-	  
-	  
-	  opengm::learning::StructMaxMargin<DSS>::Parameter para;
-	  opengm::learning::StructMaxMargin<DSS> learner(dataset,para);
-	  
-	  
-	  INF::Parameter infPara;
-	  infPara.integerConstraint_ = true;
-	  learner.learn<INF>(infPara); 
-		  const DSS::Weights& weights = learner.getWeights();
-		  std::cout <<"Weights: ";
-		  for (size_t i=0; i<weights.numberOfWeights(); ++i)
-			 std::cout << weights[i] <<" ";
-		  std::cout <<std::endl;
-   }
-
-   {
-	  DS1 dataset(4);
-	  std::cout << "Dataset includes " << dataset.getNumberOfModels() << " instances and has " << dataset.getNumberOfWeights() << " parameters."<<std::endl;
-	  
-	  
-      opengm::learning::StructMaxMargin<DS1>::Parameter para;
-      opengm::learning::StructMaxMargin<DS1> learner(dataset,para);
-	  
-	  
-	  INF::Parameter infPara;
-	  infPara.integerConstraint_ = true;
-	  learner.learn<INF>(infPara);
-          const DS1::Weights& weights = learner.getWeights();
-          std::cout <<"Weights: ";
-          for (size_t i=0; i<weights.numberOfWeights(); ++i)
-             std::cout << weights[i] <<" ";
-          std::cout <<std::endl;
-	  
-   }
-
-   {
-	  DS2 dataset(4);
-	  std::cout << "Dataset includes " << dataset.getNumberOfModels() << " instances and has " << dataset.getNumberOfWeights() << " parameters."<<std::endl;
-	  
-	  
-	  opengm::learning::StructMaxMargin<DS2>::Parameter para;
-	  para.optimizerParameter_.lambda = 1000.0;
-	  opengm::learning::StructMaxMargin<DS2> learner(dataset,para);
-	  
-	  
-	  INF::Parameter infPara;
-	  infPara.integerConstraint_ = true;
-	  learner.learn<INF>(infPara);
-		  const DS2::Weights& weights = learner.getWeights();
-		  std::cout <<"Weights: ";
-		  for (size_t i=0; i<weights.numberOfWeights(); ++i)
-			 std::cout << weights[i] <<" ";
-		  std::cout <<std::endl;
-   }
-
-/* Does this make sence?!?
-   {
-        // create editable dataset
-        EDS learningdataset;
-
-        INF::Parameter infPara;
-        infPara.integerConstraint_ = true;
-
-        std::vector< std::vector< LabelType > >GTSolutionVector;
-
-        std::cout << "inference with fixed, arbitrary weights to generate solution" << std::endl;
-
-        EDS::Weights learningWeightVector = learningdataset.getWeights();
-        EDS::Weights randomWeights(learningdataset.getNumberOfWeights());
-
-
-        // opengm::learning::StructMaxMargin<EDS>::Parameter para0;
-        // para0.optimizerParameter_.lambda = 1;
-        // opengm::learning::StructMaxMargin<EDS> learner0(learningdataset,para0);
-
-        // // // learn
-        // learner0.learn<INF>(infPara);
-
-        // std::srand(std::time(0));
-        for (int i = 0; i < learningWeightVector.numberOfWeights(); ++i)
-        {
-            randomWeights[i] = 1.0;
-
-            std::cout << randomWeights[i] << " --->  "  << learningWeightVector[i] << std::endl;
-            learningWeightVector.setWeight(i, randomWeights[i]);//double(std::rand()) / RAND_MAX * 100);
-        }
-
-        for (size_t modelIndex = 0; modelIndex < learningdataset.getNumberOfModels(); modelIndex++)
-        {
-
-            std::cout << "starting inference on GM " << modelIndex << std::endl;
-
-            // INF inference(learningdataset.getModel(modelIndex), infPara);
-            // inference.infer();
-            // std::vector< LabelType > sol1;
-            
-            // OPENGM_TEST(inference.arg(sol1) == opengm::NORMAL);
-
-            INF solver(learningdataset.getModel(modelIndex),infPara);
-            solver.infer();
-            std::vector< LabelType > sol1;
-            OPENGM_TEST(solver.arg(sol1) == opengm::NORMAL);
-
-
-            std::cout << "add solution "<< modelIndex <<" to new dataset" << std::endl;
-            learningdataset.setGT(modelIndex,sol1);
-
-            for (size_t j = 0; j < sol1.size(); j++)
-            {
-              std::cout << sol1[j];
-            }
-            std::cout << std::endl;
-            GTSolutionVector.push_back(sol1);
-        }
-
-
-        std::cout << "learn weights (without regularization)" << std::endl;
-
-        std::cout << "weight vector size " << learningdataset.getNumberOfWeights() << std::endl;
-        // Parameter
-        opengm::learning::StructMaxMargin<EDS>::Parameter para;
-        para.optimizerParameter_.lambda = 0.000000001;
-        opengm::learning::StructMaxMargin<EDS> learner(learningdataset,para);
-
-        // learn
-        learner.learn<INF>(infPara);
-
-        // get the result
-        const EDS::Weights &learnedParameters = learner.getWeights();
-        std::cout << learnedParameters.numberOfWeights() << std::endl;
-        std::cout << "set learnedParameters as new Weights: ";
-        for (size_t i = 0; i < learnedParameters.numberOfWeights(); ++i)
-        {
-            std::cout << learnedParameters[i] << " ";
-            learningWeightVector.setWeight(i, learnedParameters[i]);
-        }
-        std::cout << std::endl;
-
-        std::cout << "new weights: ";
-        for (int i = 0; i < learningWeightVector.numberOfWeights(); i++)
-        {
-            std::cout << learningWeightVector[i] << ", ";
-        }
-        std::cout << std::endl;
-
-
-        std::cout << "inference with new weights" << std::endl;
-        for (size_t modelIndex = 0; modelIndex < learningdataset.getNumberOfModels(); modelIndex++)
-        {
-            std::cout << "starting inference on GM " << modelIndex << " with learned weights" << std::endl;
-            INF solver(learningdataset.getModel(modelIndex),infPara);
-            solver.infer();
-            std::vector< LabelType > sol2;
-            OPENGM_TEST(solver.arg(sol2) == opengm::NORMAL);
-            //for (size_t j = 0; j < sol2.size(); j++)
-            //{
-            //std::cout << "sol2["<<j<<"]:" << sol2[j] << "   GTSolutionVector["<<modelIndex<<"]["<<j<<"]:" << GTSolutionVector[modelIndex][j] << std::endl; 
-            //  //!may not be true! OPENGM_TEST(sol2[j] == GTSolutionVector[modelIndex][j]);
-            //}
-            OPENGM_TEST( learningdataset.getModel(modelIndex).evaluate(sol2) ==  learningdataset.getModel(modelIndex).evaluate(GTSolutionVector[modelIndex]) );
-            // for (size_t j = 0; j < sol2.size(); j++)
-            // {
-            //    std::cout << sol2[j]; 
-            //    // OPENGM_TEST(sol2[j] == GTSolutionVector[modelIndex][j]);
-            // }
-            // std::cout << std::endl<< std::endl;
-            // for (size_t j = 0; j < sol2.size(); j++)
-            // {
-            //    std::cout <<  GTSolutionVector[modelIndex][j]; 
-            //    // OPENGM_TEST(sol2[j] == GTSolutionVector[modelIndex][j]);
-            // }
-            std::cout << "all " << sol2.size() << " solutions are correct" << std::endl;
-        }
-
-    }
-*/
-}
-
diff --git a/src/unittest/learning/test_maximum_likelihood_learner.cxx b/src/unittest/learning/test_maximum_likelihood_learner.cxx
deleted file mode 100644
index ff976a4..0000000
--- a/src/unittest/learning/test_maximum_likelihood_learner.cxx
+++ /dev/null
@@ -1,126 +0,0 @@
-#include <vector>
-
-#include <opengm/functions/explicit_function.hxx>
-#include <opengm/unittests/test.hxx>
-#include <opengm/graphicalmodel/graphicalmodel.hxx>
-#include <opengm/operations/adder.hxx>
-#include <opengm/operations/minimizer.hxx>
-#include <opengm/inference/icm.hxx>
-#include <opengm/utilities/metaprogramming.hxx>
-
-#include <opengm/functions/learnable/lpotts.hxx>
-//#include <opengm/learning/maximum-likelihood-learning.hxx>
-#include <opengm/learning/maximum_likelihood_learning.hxx>
-#include <opengm/learning/loss/hammingloss.hxx>
-#include <opengm/learning/dataset/testdatasets.hxx>
-
-
-//*************************************
-
-typedef double ValueType;
-typedef size_t IndexType;
-typedef size_t LabelType; 
-typedef opengm::meta::TypeListGenerator<
-    opengm::ExplicitFunction<ValueType,IndexType,LabelType>,
-    opengm::functions::learnable::LPotts<ValueType,IndexType,LabelType>,
-    opengm::functions::learnable::LWeightedSumOfFunctions<ValueType,IndexType,LabelType>
->::type FunctionListType;
-
-typedef opengm::GraphicalModel<
-    ValueType,opengm::Adder,
-    FunctionListType,
-    opengm::DiscreteSpace<IndexType,LabelType>
-> GM;
-
-typedef opengm::learning::HammingLoss     LOSS;
-typedef opengm::datasets::TestDataset0<GM,LOSS> DS0;
-typedef opengm::datasets::TestDataset1<GM,LOSS> DS1;
-typedef opengm::datasets::TestDataset2<GM,LOSS> DS2;
-typedef opengm::datasets::TestDatasetSimple<GM,LOSS> DSSimple;
-typedef opengm::ICM<GM,opengm::Minimizer> INF;
-
-typedef typename opengm::BeliefPropagationUpdateRules<GM, opengm::Integrator> UpdateRules;
-typedef typename opengm::MessagePassing<GM, opengm::Integrator, UpdateRules, opengm::MaxDistance> BeliefPropagation;
-//*************************************
-
-
-int main() {
-   std::cout << " Includes are fine :-) " << std::endl; 
-   /*
-   {
-      DS0 dataset;
-      std::cout << "Dataset includes " << dataset.getNumberOfModels() << " instances and has " << dataset.getNumberOfWeights() << " parameters."<<std::endl;
-      opengm::learning::MaximumLikelihoodLearner<DS0,LOSS>::Weight weight;
-      opengm::learning::MaximumLikelihoodLearner<DS0,LOSS> learner(dataset,weight);
-      INF::Parameter infWeight;
-      learner.learn<INF>(infWeight);
-
-   }
-*/
-
-
-   {
-      DS1 dataset;
-      std::cout << "Dataset includes " << dataset.getNumberOfModels() << " instances and has " << dataset.getNumberOfWeights() << " parameters."<<std::endl;
-      opengm::learning::MaximumLikelihoodLearner<DS1>::Parameter parameter;
-      parameter.maximumNumberOfIterations_ = 15;
-      parameter.gradientStepSize_ = 0.1;
-      parameter.weightStoppingCriteria_ = 0.001;
-      parameter.gradientStoppingCriteria_ = 0.00000000001;
-      parameter.infoFlag_ = true;
-      parameter.infoEveryStep_ = true;
-      parameter.weightRegularizer_ = 1.0;
-      parameter.beliefPropagationMaximumNumberOfIterations_ = 5;
-      parameter.beliefPropagationConvergenceBound_ = 0.0001;
-      parameter.beliefPropagationDamping_ = 0.5;
-      parameter.beliefPropagationTemperature_ = 0.3;
-      parameter.beliefPropagationIsAcyclic_ = opengm::Tribool(opengm::Tribool::Maybe);
-      opengm::learning::MaximumLikelihoodLearner<DS1> learner(dataset,parameter);
-
-      learner.learn();
-      
-   }
-
-   {
-      DS2 dataset;
-      std::cout << "Dataset includes " << dataset.getNumberOfModels() << " instances and has " << dataset.getNumberOfWeights() << " parameters."<<std::endl;
-      opengm::learning::MaximumLikelihoodLearner<DS2>::Parameter parameter;
-      parameter.maximumNumberOfIterations_ = 15;
-      parameter.gradientStepSize_ = 0.1;
-      parameter.weightStoppingCriteria_ = 0.001;
-      parameter.gradientStoppingCriteria_ = 0.00000000001;
-      parameter.infoFlag_ = true;
-      parameter.infoEveryStep_ = true;
-      parameter.weightRegularizer_ = 1.0;
-      parameter.beliefPropagationMaximumNumberOfIterations_ = 5;
-      parameter.beliefPropagationConvergenceBound_ = 0.0001;
-      parameter.beliefPropagationDamping_ = 0.5;
-      parameter.beliefPropagationTemperature_ = 0.3;
-      parameter.beliefPropagationIsAcyclic_ = opengm::Tribool(opengm::Tribool::Maybe);
-      opengm::learning::MaximumLikelihoodLearner<DS2> learner(dataset,parameter);
-
-      learner.learn();
-      
-   }
-/*
-
-   {
-      DS2 dataset;
-      std::cout << "Dataset includes " << dataset.getNumberOfModels() << " instances and has " << dataset.getNumberOfWeights() << " parameters."<<std::endl;
-      opengm::learning::MaximumLikelihoodLearner<DS2,LOSS>::Weight weight;
-      opengm::learning::MaximumLikelihoodLearner<DS2,LOSS> learner(dataset,weight);
-      INF::Parameter infWeight;
-      learner.learn<INF>(infWeight);
-   }
-
-/*
-   {
-      DSSimple dataset;
-      std::cout << "Dataset includes " << dataset.getNumberOfModels() << " instances and has " << dataset.getNumberOfWeights() << " parameters."<<std::endl;
-      opengm::learning::MaximumLikelihoodLearner<DSSimple,LOSS>::Weight weight;
-      opengm::learning::MaximumLikelihoodLearner<DSSimple,LOSS> learner(dataset,weight);
-      INF::Parameter infWeight;
-      learner.learn<INF>(infWeight);
-   }
-*/
-}
diff --git a/src/unittest/learning/test_subgradient_ssvm.cxx b/src/unittest/learning/test_subgradient_ssvm.cxx
deleted file mode 100644
index fd009a0..0000000
--- a/src/unittest/learning/test_subgradient_ssvm.cxx
+++ /dev/null
@@ -1,238 +0,0 @@
-#include <vector>
-
-#include <opengm/functions/explicit_function.hxx>
-#include <opengm/unittests/test.hxx>
-#include <opengm/graphicalmodel/graphicalmodel.hxx>
-#include <opengm/operations/adder.hxx>
-#include <opengm/operations/minimizer.hxx>
-#include <opengm/utilities/metaprogramming.hxx>
-
-#include <opengm/inference/lpcplex.hxx>
-#include <opengm/inference/multicut.hxx>
-#include <opengm/inference/external/trws.hxx>
-
-
-
-#include <opengm/functions/learnable/lpotts.hxx>
-#include <opengm/functions/learnable/lweightedsum_of_functions.hxx>
-#include <opengm/learning/subgradient_ssvm.hxx>
-#include <opengm/learning/loss/hammingloss.hxx>
-//#include <opengm/learning/dataset/testdataset.hxx>
-//#include <opengm/learning/dataset/testdataset2.hxx>
-#include <opengm/learning/dataset/testdatasets.hxx>
-#include <opengm/learning/dataset/editabledataset.hxx>
-
-
-//*************************************
-typedef double ValueType;
-typedef size_t IndexType;
-typedef size_t LabelType; 
-typedef opengm::meta::TypeListGenerator<opengm::ExplicitFunction<ValueType,IndexType,LabelType>, opengm::functions::learnable::LPotts<ValueType,IndexType,LabelType>, opengm::functions::learnable::LWeightedSumOfFunctions<ValueType,IndexType,LabelType> >::type FunctionListType;
-typedef opengm::GraphicalModel<ValueType,opengm::Adder, FunctionListType, opengm::DiscreteSpace<IndexType,LabelType> > GM; 
-typedef opengm::learning::HammingLoss     LOSS;
-
-typedef opengm::Multicut<GM,opengm::Minimizer> Multicut;
-typedef opengm::LPCplex<GM,opengm::Minimizer> INFCPLEX;
-typedef opengm::external::TRWS<GM> INFTRWS;
-
-typedef opengm::datasets::EditableTestDataset<GM,LOSS> EDS;
-typedef opengm::datasets::TestDataset1<GM,LOSS> DS1;
-typedef opengm::datasets::TestDataset2<GM,LOSS> DS2;
-typedef opengm::datasets::TestDatasetSimple<GM,LOSS> DSS;
-
-//*************************************
-
-
-int main() {
-   {
-      DSS dataset(5);
-      std::cout << "Dataset includes " << dataset.getNumberOfModels() << " instances and has " << dataset.getNumberOfWeights() << " parameters."<<std::endl;
-      
-      
-      opengm::learning::SubgradientSSVM<DSS>::Parameter para;
-      para.maxIterations_ = 50;
-      para.C_ = 100.0;
-      para.learningRate_ = 0.1;
-      opengm::learning::SubgradientSSVM<DSS> learner(dataset,para);
-      
-      
-      INFCPLEX::Parameter infPara;
-      infPara.integerConstraint_ = true;
-      learner.learn<INFCPLEX>(infPara); 
-          const DSS::Weights& weights = learner.getWeights();
-          std::cout <<"Weights: ";
-          for (size_t i=0; i<weights.numberOfWeights(); ++i)
-             std::cout << weights[i] <<" ";
-          std::cout <<std::endl;
-   }
-
-   {
-      DS1 dataset(4);
-      std::cout << "Dataset includes " << dataset.getNumberOfModels() << " instances and has " << dataset.getNumberOfWeights() << " parameters."<<std::endl;
-      
-      
-      opengm::learning::SubgradientSSVM<DS1>::Parameter para;
-      para.maxIterations_ = 10;
-      para.C_ = 10.0;
-      para.learningRate_ = 0.01;
-
-      opengm::learning::SubgradientSSVM<DS1> learner(dataset,para);
-      
-      
-      INFTRWS::Parameter infPara;
-      //infPara.integerConstraint_ = true;
-      learner.learn<INFTRWS>(infPara);
-      const DS1::Weights& weights = learner.getWeights();
-      std::cout <<"Weights: ";
-      for (size_t i=0; i<weights.numberOfWeights(); ++i)
-         std::cout << weights[i] <<" ";
-      std::cout <<std::endl;
-      
-   }
-
-   {
-      DS2 dataset(4);
-      std::cout << "Dataset includes " << dataset.getNumberOfModels() << " instances and has " << dataset.getNumberOfWeights() << " parameters."<<std::endl;
-      
-      
-      opengm::learning::SubgradientSSVM<DS2>::Parameter para;
-      para.maxIterations_ = 10;
-      para.C_ = 10.0;
-      para.learningRate_ = 0.01;
-      opengm::learning::SubgradientSSVM<DS2> learner(dataset,para);
-      
-      
-      INFTRWS::Parameter infPara;
-      //infPara.integerConstraint_ = true;
-      learner.learn<INFTRWS>(infPara);
-          const DS2::Weights& weights = learner.getWeights();
-          std::cout <<"Weights: ";
-          for (size_t i=0; i<weights.numberOfWeights(); ++i)
-             std::cout << weights[i] <<" ";
-          std::cout <<std::endl;
-   }
-
-/* ?!?!?
-   {
-        // create editable dataset
-        EDS learningdataset;
-
-        INFTRWS::Parameter infPara;
-
-
-        std::vector< std::vector< LabelType > >GTSolutionVector;
-
-        std::cout << "inference with fixed, arbitrary weights to generate solution" << std::endl;
-
-        EDS::Weights learningWeightVector = learningdataset.getWeights();
-        EDS::Weights randomWeights(learningdataset.getNumberOfWeights());
-
-
-        // opengm::learning::SubgradientSSVM<EDS>::Parameter para0;
-        // para0.optimizerParameter_.lambda = 1;
-        // opengm::learning::SubgradientSSVM<EDS> learner0(learningdataset,para0);
-
-        // // // learn
-        // learner0.learn<INFTRWS>(infPara);
-
-        // std::srand(std::time(0));
-        for (int i = 0; i < learningWeightVector.numberOfWeights(); ++i)
-        {
-            randomWeights[i] = 1.0;
-
-            std::cout << randomWeights[i] << " --->  "  << learningWeightVector[i] << std::endl;
-            learningWeightVector.setWeight(i, randomWeights[i]);//double(std::rand()) / RAND_MAX * 100);
-        }
-
-        for (size_t modelIndex = 0; modelIndex < learningdataset.getNumberOfModels(); modelIndex++)
-        {
-
-            std::cout << "starting inference on GM " << modelIndex << std::endl;
-
-            // INFTRWS inference(learningdataset.getModel(modelIndex), infPara);
-            // inference.infer();
-            // std::vector< LabelType > sol1;
-            
-            // OPENGM_TEST(inference.arg(sol1) == opengm::NORMAL);
-
-            INFTRWS solver(learningdataset.getModel(modelIndex),infPara);
-            solver.infer();
-            std::vector< LabelType > sol1;
-            OPENGM_TEST(solver.arg(sol1) == opengm::NORMAL);
-
-
-            std::cout << "add solution "<< modelIndex <<" to new dataset" << std::endl;
-            learningdataset.setGT(modelIndex,sol1);
-
-            for (size_t j = 0; j < sol1.size(); j++)
-            {
-              std::cout << sol1[j];
-            }
-            std::cout << std::endl;
-            GTSolutionVector.push_back(sol1);
-        }
-
-
-        std::cout << "learn weights (without regularization)" << std::endl;
-
-        std::cout << "weight vector size " << learningdataset.getNumberOfWeights() << std::endl;
-        // Parameter
-        opengm::learning::SubgradientSSVM<EDS>::Parameter para;
-          para.maxIterations_ = 500;
-          para.C_ = 10000.0;
-          para.learningRate_ = 0.1;
-        opengm::learning::SubgradientSSVM<EDS> learner(learningdataset,para);
-
-        // learn
-        learner.learn<INFTRWS>(infPara);
-
-        // get the result
-        const EDS::Weights &learnedParameters = learner.getWeights();
-        std::cout << learnedParameters.numberOfWeights() << std::endl;
-        std::cout << "set learnedParameters as new Weights: ";
-        for (size_t i = 0; i < learnedParameters.numberOfWeights(); ++i)
-        {
-            std::cout << learnedParameters[i] << " ";
-            learningWeightVector.setWeight(i, learnedParameters[i]);
-        }
-        std::cout << std::endl;
-
-        std::cout << "new weights: ";
-        for (int i = 0; i < learningWeightVector.numberOfWeights(); i++)
-        {
-            std::cout << learningWeightVector[i] << ", ";
-        }
-        std::cout << std::endl;
-
-
-        std::cout << "inference with new weights" << std::endl;
-        for (size_t modelIndex = 0; modelIndex < learningdataset.getNumberOfModels(); modelIndex++)
-        {
-            std::cout << "starting inference on GM " << modelIndex << " with learned weights" << std::endl;
-            INFTRWS solver(learningdataset.getModel(modelIndex),infPara);
-            solver.infer();
-            std::vector< LabelType > sol2;
-            OPENGM_TEST(solver.arg(sol2) == opengm::NORMAL);
-            for (size_t j = 0; j < sol2.size(); j++)
-            {
-               std::cout << "sol2["<<j<<"]:" << sol2[j] << "   GTSolutionVector["<<modelIndex<<"]["<<j<<"]:" << GTSolutionVector[modelIndex][j] << std::endl; 
-               OPENGM_TEST(sol2[j] == GTSolutionVector[modelIndex][j]);
-            }
-            // for (size_t j = 0; j < sol2.size(); j++)
-            // {
-            //    std::cout << sol2[j]; 
-            //    // OPENGM_TEST(sol2[j] == GTSolutionVector[modelIndex][j]);
-            // }
-            // std::cout << std::endl<< std::endl;
-            // for (size_t j = 0; j < sol2.size(); j++)
-            // {
-            //    std::cout <<  GTSolutionVector[modelIndex][j]; 
-            //    // OPENGM_TEST(sol2[j] == GTSolutionVector[modelIndex][j]);
-            // }
-            std::cout << "all " << sol2.size() << " solutions are correct" << std::endl;
-        }
-
-    }
-*/
-}
-

-- 
Alioth's /usr/local/bin/git-commit-notice on /srv/git.debian.org/git/debian-science/packages/opengm.git



More information about the debian-science-commits mailing list