[opengm] 349/386: Revert "Merge pull request #423 from opengm/learning-experimental"

Ghislain Vaillant ghisvail-guest at moszumanska.debian.org
Wed Aug 31 08:38:31 UTC 2016


This is an automated email from the git hooks/post-receive script.

ghisvail-guest pushed a commit to branch debian/master
in repository opengm.

commit 00163b3dcd342686d57ce1a7f2b11b0b79adb9ae
Author: Sven Peter <sven.peter at iwr.uni-heidelberg.de>
Date:   Wed Mar 30 10:57:36 2016 +0200

    Revert "Merge pull request #423 from opengm/learning-experimental"
    
    This reverts commit f8f448c0b5009882b6dbf8ab34df41711b72b0e9, reversing
    changes made to e3408d084b219dce69a515117c3c1253e3cb5b7d.
---
 .gitignore                                         |   7 -
 CMakeLists.txt                                     |  16 +-
 README.md                                          |   4 +-
 fubar/brown_horse.py                               | 127 ----
 fubar/brown_horse2.py                              | 136 ----
 fubar/brown_horse_sp.py                            | 206 ------
 fubar/example1.py                                  |  35 -
 fubar/make_grid_potts_dset.py                      |   1 -
 fubar/make_mc_dset.py                              |  19 -
 fubar/max_likelihood_example.py                    |  40 --
 fubar/noisy_squares_3_labels.py                    | 121 ----
 fubar/pascal_voc/convertPascalVOC.py               |  94 ---
 fubar/pascal_voc/convertPascalVOCNew.py            | 108 ---
 fubar/pascal_voc/learnPascalVOC.py                 |  35 -
 fubar/python_stuff.py                              | 102 ---
 fubar/real_example.py                              |  96 ---
 fubar/real_example_2.py                            | 150 -----
 fubar/simple_sp.py                                 | 165 -----
 fubar/toy_dataset.py                               | 125 ----
 fubar/whorse.py                                    |  79 ---
 .../opengm/datastructures/marray/marray_hdf5.hxx   |  25 -
 .../opengm/functions/function_properties_base.hxx  |  11 +-
 include/opengm/functions/l_potts.hxx               | 202 ++++++
 include/opengm/functions/learnable/lpotts.hxx      | 254 -------
 include/opengm/functions/learnable/lunary.hxx      | 478 --------------
 .../learnable/lweightedsum_of_functions.hxx        | 264 --------
 include/opengm/functions/potts.hxx                 |   4 +-
 include/opengm/functions/readme.txt                |   3 -
 include/opengm/functions/unary_loss_function.hxx   | 151 -----
 include/opengm/functions/view_convert_function.hxx |  36 +-
 include/opengm/graphicalmodel/graphicalmodel.hxx   |  44 +-
 .../graphicalmodel/graphicalmodel_factor.hxx       |  27 -
 .../graphicalmodel_function_wrapper.hxx            |  79 ---
 include/opengm/graphicalmodel/parameters.hxx       |  44 ++
 include/opengm/graphicalmodel/weights.hxx          | 282 --------
 include/opengm/inference/alphabetaswap.hxx         |  33 +-
 include/opengm/inference/alphaexpansion.hxx        |  85 +--
 include/opengm/inference/alphaexpansionfusion.hxx  |  75 +--
 include/opengm/inference/astar.hxx                 | 129 ++--
 .../auxiliary/fusion_move/fusion_mover.hxx         |  27 -
 .../auxiliary/lp_solver/lp_solver_interface.hxx    |  12 +-
 include/opengm/inference/bruteforce.hxx            |  21 +-
 include/opengm/inference/combilp.hxx               |  11 -
 .../dualdecomposition/dualdecomposition_bundle.hxx |  28 -
 .../dualdecomposition_subgradient.hxx              |  37 +-
 include/opengm/inference/dynamicprogramming.hxx    |  20 -
 include/opengm/inference/external/ad3.hxx          |  23 -
 include/opengm/inference/external/daoopt.hxx       |  26 -
 include/opengm/inference/external/fastPD.hxx       |  16 -
 include/opengm/inference/external/mrflib.hxx       |  15 -
 include/opengm/inference/external/qpbo.hxx         |  35 +-
 include/opengm/inference/external/trws.hxx         |  38 +-
 include/opengm/inference/fusion_based_inf.hxx      |  23 +-
 include/opengm/inference/graphcut.hxx              |  29 +-
 include/opengm/inference/greedygremlin.hxx         |  19 +-
 include/opengm/inference/hqpbo.hxx                 |  23 +-
 include/opengm/inference/icm.hxx                   |  63 +-
 include/opengm/inference/infandflip.hxx            |  23 -
 include/opengm/inference/inference.hxx             |   9 -
 include/opengm/inference/lazyflipper.hxx           | 161 ++---
 include/opengm/inference/loc.hxx                   |  29 -
 include/opengm/inference/lp_inference_base.hxx     |   1 +
 include/opengm/inference/lpcplex.hxx               |  99 +--
 include/opengm/inference/lpcplex2.hxx              |  10 -
 include/opengm/inference/lpgurobi.hxx              |  80 +--
 include/opengm/inference/lpgurobi2.hxx             |  10 -
 include/opengm/inference/lsatr.hxx                 |  26 +-
 .../inference/messagepassing/messagepassing.hxx    |  30 -
 .../inference/messagepassing/messagepassing_bp.hxx |  11 -
 .../messagepassing/messagepassing_trbp.hxx         |   9 -
 include/opengm/inference/movemaker.hxx             |   8 -
 include/opengm/inference/mqpbo.hxx                 |  31 +-
 include/opengm/inference/multicut.hxx              |  39 +-
 include/opengm/inference/partition-move.hxx        |  15 +-
 include/opengm/inference/qpbo.hxx                  |  17 +-
 include/opengm/inference/reducedinference.hxx      |  26 -
 include/opengm/inference/sat.hxx                   |  18 +-
 include/opengm/inference/self_fusion.hxx           |  47 +-
 include/opengm/learning/bundle-optimizer.hxx       | 326 ---------
 include/opengm/learning/dataset/dataset.hxx        | 234 -------
 include/opengm/learning/dataset/dataset_io.hxx     | 134 ----
 .../opengm/learning/dataset/editabledataset.hxx    | 146 -----
 include/opengm/learning/dataset/testdatasets.hxx   | 375 -----------
 include/opengm/learning/gradient-accumulator.hxx   | 175 -----
 include/opengm/learning/gridsearch-learning.hxx    | 126 ----
 include/opengm/learning/loss/flexibleloss.hxx      | 305 ---------
 .../learning/loss/generalized-hammingloss.hxx      | 152 -----
 include/opengm/learning/loss/hammingloss.hxx       |  81 ---
 include/opengm/learning/loss/noloss.hxx            |  73 ---
 .../learning/maximum-likelihood-learning.hxx       | 310 ---------
 .../learning/maximum_likelihood_learning.hxx       | 238 -------
 include/opengm/learning/rws.hxx                    | 286 --------
 include/opengm/learning/solver/BundleCollector.h   |  49 --
 include/opengm/learning/solver/CplexBackend.h      | 433 ------------
 include/opengm/learning/solver/GurobiBackend.h     | 439 -------------
 include/opengm/learning/solver/LinearConstraint.h  |  94 ---
 include/opengm/learning/solver/LinearConstraints.h | 119 ----
 include/opengm/learning/solver/LinearObjective.h   |  24 -
 .../opengm/learning/solver/LinearSolverBackend.h   |  84 ---
 .../opengm/learning/solver/QuadraticObjective.h    | 181 -----
 .../learning/solver/QuadraticSolverBackend.h       |  28 -
 .../learning/solver/QuadraticSolverFactory.h       |  33 -
 .../learning/solver/QuadraticSolverParameters.h    |  15 -
 include/opengm/learning/solver/Relation.h          |  20 -
 include/opengm/learning/solver/Sense.h             |  20 -
 include/opengm/learning/solver/Solution.h          |  49 --
 include/opengm/learning/solver/VariableType.h      |  18 -
 include/opengm/learning/struct-max-margin.hxx      | 219 -------
 include/opengm/learning/structured_perceptron.hxx  | 208 ------
 include/opengm/learning/subgradient_ssvm.hxx       | 353 ----------
 include/opengm/learning/weight_averaging.hxx       |  68 --
 include/opengm/python/numpyview.hxx                |  22 +-
 include/opengm/python/opengmpython.hxx             |  73 +--
 include/opengm/utilities/metaprogramming.hxx       |  83 +--
 include/opengm/utilities/shape_accessor.hxx        |  78 +--
 src/examples/unsorted-examples/inference_types.cxx |   9 +-
 .../commandline/double/opengm_min_sum.cxx          |   9 +-
 src/interfaces/python/opengm/CMakeLists.txt        |   1 -
 src/interfaces/python/opengm/__init__.py           |   2 +-
 .../opengm/_inference_interface_generator.py       |  59 --
 src/interfaces/python/opengm/functionhelper.py     |   4 +-
 .../python/opengm/inference/inf_def_visitor.hxx    |   1 -
 .../python/opengm/inference/pyFusionMoves.cxx      |   4 +-
 .../python/opengm/learning/CMakeLists.txt          | 151 -----
 src/interfaces/python/opengm/learning/__init__.py  | 726 ---------------------
 src/interfaces/python/opengm/learning/helper.hxx   | 330 ----------
 src/interfaces/python/opengm/learning/learning.cxx |  91 ---
 .../python/opengm/learning/pyDataset.cxx           | 104 ---
 .../python/opengm/learning/pyGridSearchLearner.cxx |  64 --
 .../python/opengm/learning/pyLFunctionGen.cxx      | 309 ---------
 src/interfaces/python/opengm/learning/pyLoss.cxx   |  81 ---
 .../opengm/learning/pyMaxLikelihoodLearner.cxx     |  81 ---
 src/interfaces/python/opengm/learning/pyRws.cxx    |  72 --
 .../opengm/learning/pyStructMaxMarginLearner.cxx   |  64 --
 .../python/opengm/learning/pyStructPerceptron.cxx  |  75 ---
 .../python/opengm/learning/pySubgradientSSVM.cxx   |  80 ---
 .../python/opengm/learning/pyWeights.cxx           |  46 --
 .../python/opengm/opengmcore/__init__.py           |  19 +-
 .../python/opengm/opengmcore/function_injector.py  |  11 +-
 .../python/opengm/opengmcore/opengmcore.cpp        |  64 +-
 .../python/opengm/opengmcore/pyFunctionGen.cxx     |  57 --
 .../python/opengm/opengmcore/pyFunctionTypes.cxx   | 241 +------
 src/interfaces/python/opengm/opengmcore/pyGm.cxx   |  25 +-
 src/interfaces/python/test.py                      | 115 +---
 src/tutorials/c++/basics/doMinSumInference.cxx     |   2 +-
 src/tutorials/c++/basics/doSumProdInference.cxx    |   2 +-
 src/unittest/CMakeLists.txt                        |   6 +-
 src/unittest/inference/test_graphcut.cxx           |   6 +-
 src/unittest/inference/test_lazyflipper.cxx        |   2 +-
 src/unittest/inference/test_messagepassing.cxx     |  20 +-
 src/unittest/learning/CMakeLists.txt               |  51 --
 src/unittest/learning/test_dataset.cxx             | 150 -----
 src/unittest/learning/test_dataset_io.cxx          | 101 ---
 .../learning/test_generalized_hammingloss.cxx      |  65 --
 src/unittest/learning/test_gridsearch_learner.cxx  |  90 ---
 src/unittest/learning/test_learning.cxx            | 233 -------
 .../learning/test_maximum_likelihood_learner.cxx   | 126 ----
 src/unittest/learning/test_subgradient_ssvm.cxx    | 238 -------
 src/unittest/test_gm_learning_functions.cxx        |  23 +-
 src/unittest/test_learnable_functions.cxx          |  79 ---
 src/unittest/test_linear_constraint.cxx            |   1 -
 161 files changed, 703 insertions(+), 14115 deletions(-)

diff --git a/.gitignore b/.gitignore
index 29d22cb..44a7094 100644
--- a/.gitignore
+++ b/.gitignore
@@ -7,10 +7,3 @@
 
 *~
 *.swp
-
-pascal_voc_val_gm_*.h5
-pascal_voc_train_gm*.h5
-pascal_voc_train_info.h5
-pascal_voc_val_info.h5
-data_train.pickle
-data_val.pickle
diff --git a/CMakeLists.txt b/CMakeLists.txt
index 32bfce8..92e894c 100644
--- a/CMakeLists.txt
+++ b/CMakeLists.txt
@@ -95,10 +95,6 @@ OPTION(BUILD_MATLAB_WRAPPER "Build matlab wrapper" OFF)
 ###Grante needs C++11. Since we have not tested OpenGM under this standard yet, using Grante is realy experimental!!!
 ###OPTION(WITH_GRANTE "Include wrapper for grante" OFF)
 
-if(CI)
-    add_definitions(-DCI)
-endif()
-
 #--------------------------------------------------------------
 # Cplex
 #--------------------------------------------------------------
@@ -114,9 +110,6 @@ else()
 endif()
 
 
-
-
-
 #--------------------------------------------------------------
 # AD3
 #--------------------------------------------------------------
@@ -385,8 +378,6 @@ if(WITH_OPENMP)
   #SET(OPENMP_INCLUDE_DIR "" CACHE STRING "OpenMP include dir")
   #include_directories(${OPENMP_INCLUDE_DIR})
   add_definitions(-DWITH_OPENMP)
-  set (CMAKE_C_FLAGS "${CMAKE_C_FLAGS} ${OpenMP_C_FLAGS}")
-  set (CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} ${OpenMP_CXX_FLAGS}")
 else()
    message(STATUS "build without openMP -> multithreaded options disabled")
 endif(WITH_OPENMP)
@@ -522,14 +513,9 @@ endif(WITH_BLOSSOM5)
 
 
 #--------------------------------------------------------------
-# thread libs
+# thread lib
 #--------------------------------------------------------------
 find_package(Threads)
-find_package(OpenMP)
-
-if(OPENMP_FOUND)
-  add_definitions(-DWITH_OPENMP)
-endif()
 
 #--------------------------------------------------------------
 # rt lib
diff --git a/README.md b/README.md
index 13d187a..42b005c 100644
--- a/README.md
+++ b/README.md
@@ -1,9 +1,7 @@
 OpenGM 2
 ========
 
-[![Build Status](https://travis-ci.org/opengm/opengm.png?branch=master)](https://travis-ci.org/opengm/opengm) (master)
-
-[![Build Status](https://travis-ci.org/opengm/opengm.png?branch=learning-experimental)](https://travis-ci.org/opengm/opengm) (learning-experimental)
+[![Build Status](https://travis-ci.org/opengm/opengm.png?branch=master)](https://travis-ci.org/opengm/opengm)
 
 
 -----------------------------------------------------------------------------------------------
diff --git a/fubar/brown_horse.py b/fubar/brown_horse.py
deleted file mode 100644
index da46a97..0000000
--- a/fubar/brown_horse.py
+++ /dev/null
@@ -1,127 +0,0 @@
-import numpy
-import opengm
-from opengm import learning
-import vigra
-from progressbar import *
-import glob
-import os
-from functools import partial
-from opengm.learning import secondOrderImageDataset, getPbar
-
-
-
-def posiFeatures(img):
-    shape = img.shape[0:2]
-    x = numpy.linspace(0, 1, shape[0])
-    y = numpy.linspace(0, 1, shape[1])
-    xv, yv = numpy.meshgrid(y, x)
-    xv -=0.5
-    yv -=0.5
-
-    rad = numpy.sqrt(xv**2 + yv**2)[:,:,None]
-    erad = numpy.exp(1.0 - rad)
-    xva = (xv**2)[:,:,None]
-    yva = (yv**2)[:,:,None]
-
-    res = numpy.concatenate([erad, rad,xva,yva,xv[:,:,None],yv[:,:,None]],axis=2)
-    assert res.shape[0:2] == img.shape[0:2]
-    return res
-
-#i = numpy.ones([7, 5])
-#
-#print posiFeatures(i).shape
-#
-# where is the dataset stored
-dsetRoot = '/home/tbeier/datasets/weizmann_horse_db/'
-imgPath = dsetRoot + 'brown_horse/'
-gtBasePath = dsetRoot + 'figure_ground/'
-
-imgFiles = glob.glob(imgPath+'*.jpg')
-takeNth = 3
-imgs = []
-gts = []
-pbar = getPbar(len(imgFiles), 'Load Image')
-pbar.start()
-for i,path in enumerate(imgFiles):
-    gtPath =  gtBasePath + os.path.basename(path)
-    rgbImg  = vigra.impex.readImage(path)
-    gtImg  = vigra.impex.readImage(gtPath).astype('uint32')[::takeNth,::takeNth]
-    gtImg[gtImg<125] = 0
-    gtImg[gtImg>=125] = 1
-    rgbImg = vigra.resize(rgbImg, [gtImg.shape[0],gtImg.shape[1]])
-    imgs.append(rgbImg)
-    gts.append(gtImg)
-    pbar.update(i)
-pbar.finish()
-
-def getSelf(img):
-    return img
-
-
-def labHessianOfGaussian(img, sigma):
-    l = vigra.colors.transform_RGB2Lab(img)[:,:,0]
-    l = vigra.taggedView(l,'xy')
-    return vigra.filters.hessianOfGaussianEigenvalues(l, sigma)
-
-def labStructTensorEv(img, sigma):
-    l = vigra.colors.transform_RGB2Lab(img)[:,:,0]
-    l = vigra.taggedView(l,'xy')
-    return vigra.filters.structureTensorEigenvalues(l, sigma, 2*sigma)
-
-fUnary = [
-    posiFeatures,
-    getSelf,
-    vigra.colors.transform_RGB2XYZ,
-    vigra.colors.transform_RGB2Lab,
-    vigra.colors.transform_RGB2Luv,
-    partial(labHessianOfGaussian, sigma=1.0),
-    partial(labHessianOfGaussian, sigma=2.0),
-    partial(vigra.filters.gaussianGradientMagnitude, sigma=1.0),
-    partial(vigra.filters.gaussianGradientMagnitude, sigma=2.0),
-]
-
-fBinary = [
-    posiFeatures,
-    vigra.colors.transform_RGB2XYZ,
-    vigra.colors.transform_RGB2Lab,
-    vigra.colors.transform_RGB2Luv,
-    partial(labHessianOfGaussian, sigma=1.0),
-    partial(labHessianOfGaussian, sigma=2.0),
-    partial(labStructTensorEv, sigma=1.0),
-    partial(labStructTensorEv, sigma=2.0),
-    partial(vigra.filters.gaussianGradientMagnitude, sigma=1.0),
-    partial(vigra.filters.gaussianGradientMagnitude, sigma=2.0),
-]
-
-
-dataset,test_set = secondOrderImageDataset(imgs=imgs, gts=gts, numberOfLabels=2, 
-                                          fUnary=fUnary, fBinary=fBinary, 
-                                          addConstFeature=False)
-
-
-
-
-
-learner =  learning.subgradientSSVM(dataset, learningRate=0.05, C=100, 
-                                    learningMode='batch',maxIterations=1000)
-
-
-#learner = learning.structMaxMarginLearner(dataset, 0.1, 0.001, 0)
-
-
-learner.learn(infCls=opengm.inference.LazyFlipper, 
-              parameter=opengm.InfParam(maxSubgraphSize=3))
-
-
-
-# predict on test test
-for (rgbImg, gtImg, gm) in test_set :
-    # infer for test image
-    inf = opengm.inference.QpboExternal(gm)
-    inf.infer()
-    arg = inf.arg()
-    arg = arg.reshape( numpy.squeeze(gtImg.shape))
-
-    vigra.segShow(rgbImg, arg+2)
-    vigra.show()
-
diff --git a/fubar/brown_horse2.py b/fubar/brown_horse2.py
deleted file mode 100644
index 89f3ddb..0000000
--- a/fubar/brown_horse2.py
+++ /dev/null
@@ -1,136 +0,0 @@
-import numpy
-import opengm
-from opengm import learning
-import vigra
-from progressbar import *
-import glob
-import os
-from functools import partial
-from opengm.learning import secondOrderImageDataset, getPbar
-
-
-
-def posiFeatures(img):
-    shape = img.shape[0:2]
-    x = numpy.linspace(0, 1, shape[0])
-    y = numpy.linspace(0, 1, shape[1])
-    xv, yv = numpy.meshgrid(y, x)
-    xv -=0.5
-    yv -=0.5
-
-    rad = numpy.sqrt(xv**2 + yv**2)[:,:,None]
-    erad = numpy.exp(1.0 - rad)
-    xva = (xv**2)[:,:,None]
-    yva = (yv**2)[:,:,None]
-
-    res = numpy.concatenate([erad, rad,xva,yva,xv[:,:,None],yv[:,:,None]],axis=2)
-    assert res.shape[0:2] == img.shape[0:2]
-    return res
-
-#i = numpy.ones([7, 5])
-#
-#print posiFeatures(i).shape
-#
-# where is the dataset stored
-dsetRoot = '/home/tbeier/datasets/weizmann_horse_db/'
-imgPath = dsetRoot + 'brown_horse/'
-gtBasePath = dsetRoot + 'figure_ground/'
-
-imgFiles = glob.glob(imgPath+'*.jpg')
-takeNth = 2
-imgs = []
-gts = []
-pbar = getPbar(len(imgFiles), 'Load Image')
-pbar.start()
-for i,path in enumerate(imgFiles):
-    gtPath =  gtBasePath + os.path.basename(path)
-    rgbImg  = vigra.impex.readImage(path)
-    gtImg  = vigra.impex.readImage(gtPath).astype('uint32')[::takeNth,::takeNth]
-    gtImg[gtImg<125] = 0
-    gtImg[gtImg>=125] = 1
-    cEdgeImg = vigra.analysis.regionImageToCrackEdgeImage(gtImg+1)
-    cEdgeImg[cEdgeImg>0] = 1
-    cEdgeImg = vigra.filters.discErosion(cEdgeImg.astype('uint8'),2)
-    gtImg = cEdgeImg.astype(numpy.uint64)
-
-    if i ==0:
-        vigra.imshow(cEdgeImg)
-        vigra.show()
-    rgbImg = vigra.resize(rgbImg, [gtImg.shape[0],gtImg.shape[1]])
-    imgs.append(rgbImg)
-    gts.append(gtImg)
-    pbar.update(i)
-pbar.finish()
-
-def getSelf(img):
-    return img
-
-
-def labHessianOfGaussian(img, sigma):
-    l = vigra.colors.transform_RGB2Lab(img)[:,:,0]
-    l = vigra.taggedView(l,'xy')
-    return vigra.filters.hessianOfGaussianEigenvalues(l, sigma)
-
-def labStructTensorEv(img, sigma):
-    l = vigra.colors.transform_RGB2Lab(img)[:,:,0]
-    l = vigra.taggedView(l,'xy')
-    return vigra.filters.structureTensorEigenvalues(l, sigma, 2*sigma)
-
-fUnary = [
-    #posiFeatures,
-    #getSelf,
-    vigra.colors.transform_RGB2XYZ,
-    vigra.colors.transform_RGB2Lab,
-    vigra.colors.transform_RGB2Luv,
-    partial(labHessianOfGaussian, sigma=1.0),
-    partial(labHessianOfGaussian, sigma=2.0),
-    partial(labStructTensorEv, sigma=1.0),
-    partial(labStructTensorEv, sigma=2.0),
-    partial(vigra.filters.gaussianGradientMagnitude, sigma=1.0),
-    partial(vigra.filters.gaussianGradientMagnitude, sigma=2.0),
-]
-
-fBinary = [
-    #posiFeatures,
-    vigra.colors.transform_RGB2XYZ,
-    vigra.colors.transform_RGB2Lab,
-    vigra.colors.transform_RGB2Luv,
-    partial(labHessianOfGaussian, sigma=1.0),
-    partial(labHessianOfGaussian, sigma=2.0),
-    partial(labStructTensorEv, sigma=1.0),
-    partial(labStructTensorEv, sigma=2.0),
-    partial(vigra.filters.gaussianGradientMagnitude, sigma=1.0),
-    partial(vigra.filters.gaussianGradientMagnitude, sigma=2.0),
-]
-
-
-dataset,test_set = secondOrderImageDataset(imgs=imgs, gts=gts, numberOfLabels=2, 
-                                          fUnary=fUnary, fBinary=fBinary, 
-                                          addConstFeature=False)
-
-
-
-
-learner =  learning.subgradientSSVM(dataset, learningRate=0.1, C=1000, 
-                                    learningMode='batch',maxIterations=1000)
-
-#learner = learning.structMaxMarginLearner(dataset, 0.1, 0.001, 0)
-
-
-learner.learn(infCls=opengm.inference.QpboExternal, 
-              parameter=opengm.InfParam())
-
-
-
-# predict on test test
-for (rgbImg, gtImg, gm) in test_set :
-    # infer for test image
-    inf = opengm.inference.QpboExternal(gm)
-    inf.infer()
-    arg = inf.arg()
-    arg = arg.reshape( numpy.squeeze(gtImg).shape)
-
-    vigra.imshow(arg)
-    #vigra.segShow(rgbImg, arg+2)
-    vigra.show()
-
diff --git a/fubar/brown_horse_sp.py b/fubar/brown_horse_sp.py
deleted file mode 100644
index 1d1579b..0000000
--- a/fubar/brown_horse_sp.py
+++ /dev/null
@@ -1,206 +0,0 @@
-import numpy
-import opengm
-from opengm import learning
-import vigra
-from progressbar import *
-import glob
-import os
-from functools import partial
-from opengm.learning import secondOrderImageDataset, getPbar,superpixelDataset
-
-
-
-
-
-#i = numpy.ones([7, 5])
-#
-#print posiFeatures(i).shape
-#
-# where is the dataset stored
-dsetRoot = '/home/tbeier/datasets/weizmann_horse_db/'
-imgPath = dsetRoot + 'rgb/'
-gtBasePath = dsetRoot + 'figure_ground/'
-
-imgFiles = glob.glob(imgPath+'*.jpg')
-takeNth = 2
-
-imgs = []
-sps = []
-gts = []
-
-
-pbar = getPbar(len(imgFiles), 'Load Image')
-pbar.start()
-for i,path in enumerate(imgFiles):
-
-    if i>20 :
-        break
-    gtPath =  gtBasePath + os.path.basename(path)
-    rgbImg  = vigra.impex.readImage(path)
-    gtImg  = vigra.impex.readImage(gtPath).astype('uint32')[::takeNth,::takeNth]
-    gtImg[gtImg<125] = 0
-    gtImg[gtImg>=125] = 1
-    rgbImg = vigra.resize(rgbImg, [gtImg.shape[0],gtImg.shape[1]])
-    
-
-    #vigra.imshow(gtImg.astype('float32'))
-    #vigra.show()
-
-    labImg = vigra.colors.transform_RGB2Lab(rgbImg.astype('float32'))
-    sp,nSeg  = vigra.analysis.slicSuperpixels(labImg, intensityScaling=20.0, seedDistance=5)
-    sp = vigra.analysis.labelImage(sp)-1
-
-    #vigra.segShow(rgbImg, sp)
-    #vigra.show()
-    gg  = vigra.graphs.gridGraph(rgbImg.shape[0:2])
-    rag = vigra.graphs.regionAdjacencyGraph(gg,sp)
-
-    gt,qtq = rag.projectBaseGraphGt(gtImg)
-
-    #rag.show(rgbImg, gt)
-    #vigra.show()
-
-
-    imgs.append(rgbImg)
-    gts.append(gt)
-    sps.append(sp)
-
-    pbar.update(i)
-
-
-pbar.finish()
-
-def posiFeatures(img):
-    shape = img.shape[0:2]
-    x = numpy.linspace(0, 1, shape[0])
-    y = numpy.linspace(0, 1, shape[1])
-    xv, yv = numpy.meshgrid(y, x)
-    xv -=0.5
-    yv -=0.5
-
-    rad = numpy.sqrt(xv**2 + yv**2)[:,:,None]
-    erad = numpy.exp(1.0 - rad)
-    xva = (xv**2)[:,:,None]
-    yva = (yv**2)[:,:,None]
-
-    res = numpy.concatenate([erad, rad,xva,yva,xv[:,:,None],yv[:,:,None]],axis=2)
-    assert res.shape[0:2] == img.shape[0:2]
-    return res
-
-def getSelf(img):
-    f=img.copy()
-    f-=f.min()
-    f/=f.max()
-    return f
-
-
-def labHessianOfGaussian(img, sigma):
-    l = vigra.colors.transform_RGB2Lab(img)[:,:,0]
-    l = vigra.taggedView(l,'xy')
-    f =  vigra.filters.hessianOfGaussianEigenvalues(l, sigma)
-    f-=f.min()
-    f/=f.max()
-    return f
-
-def labStructTensorEv(img, sigma):
-    l = vigra.colors.transform_RGB2Lab(img)[:,:,0]
-    l = vigra.taggedView(l,'xy')
-    f = vigra.filters.structureTensorEigenvalues(l, sigma, 2*sigma)
-    f-=f.min()
-    f/=f.max()
-    return f
-
-def rgbHist(img):
-    minVals=(0.0,0.0,0.0)
-    maxVals=(255.0, 255.0, 255.0)
-    img = vigra.taggedView(img,'xyc')
-    hist = vigra.histogram.gaussianHistogram(img,minVals,maxVals,bins=30,sigma=3.0, sigmaBin=1.0)
-    f = vigra.taggedView(hist,'xyc')
-    f-=f.min()
-    f/=f.max()
-    return f
-
-
-def labHist(img):
-    minVals=(0.0,-86.1814   ,-107.862)
-    maxVals=(100.0, 98.2353, 94.48)
-    imgl= vigra.colors.transform_RGB2Lab(img)
-    hist = vigra.histogram.gaussianHistogram(imgl,minVals,maxVals,bins=30,sigma=3.0, sigmaBin=1.0)
-    f = vigra.taggedView(hist,'xyc')
-    f-=f.min()
-    f/=f.max()
-    return f
-
-def gmag(img, sigma):
-    f =  vigra.filters.gaussianGradientMagnitude(img, sigma)
-    f-=f.min()
-    f/=f.max()
-    return f
-
-fUnary = [
-    posiFeatures,
-    labHist,
-    rgbHist,
-    getSelf,
-    #vigra.colors.transform_RGB2XYZ,
-    #vigra.colors.transform_RGB2Lab,
-    #vigra.colors.transform_RGB2Luv,
-    #partial(labHessianOfGaussian, sigma=1.0),   
-    #partial(labHessianOfGaussian, sigma=2.0),
-    #partial(vigra.filters.gaussianGradientMagnitude, sigma=1.0),
-    #partial(vigra.filters.gaussianGradientMagnitude, sigma=2.0),
-]#
-
-fBinary = [
-    #posiFeatures,
-    ##rgbHist,
-    #partial(labHessianOfGaussian, sigma=1.0),
-    #partial(labHessianOfGaussian, sigma=2.0),
-    #partial(labStructTensorEv, sigma=1.0),
-    #partial(labStructTensorEv, sigma=2.0),
-    partial(gmag, sigma=1.0),
-    partial(gmag, sigma=2.0),
-]
-
-
-dataset,test_set = superpixelDataset(imgs=imgs,sps=sps, gts=gts, numberOfLabels=2, 
-                                          fUnary=fUnary, fBinary=fBinary, 
-                                          addConstFeature=True)
-
-
-
-
-
-learner =  learning.subgradientSSVM(dataset, learningRate=0.1, C=0.1, 
-                                    learningMode='batch',maxIterations=2000, averaging=-1)
-
-
-#learner = learning.structMaxMarginLearner(dataset, 0.1, 0.001, 0)
-
-
-learner.learn(infCls=opengm.inference.QpboExternal, 
-              parameter=opengm.InfParam())
-
-
-w = dataset.getWeights()
-
-for wi in range(len(w)):
-    print "wi ",w[wi]
-
-
-# predict on test test
-for (rgbImg, sp, gm) in test_set :
-    # infer for test image
-    inf = opengm.inference.QpboExternal(gm)
-    inf.infer()
-    arg = inf.arg()+1
-
-
-    gg  = vigra.graphs.gridGraph(rgbImg.shape[0:2])
-    rag = vigra.graphs.regionAdjacencyGraph(gg,sp)
-
-    seg = rag.projectLabelsToBaseGraph(arg.astype('uint32'))
-
-    vigra.segShow(rgbImg, seg+2)
-    vigra.show()
-
diff --git a/fubar/example1.py b/fubar/example1.py
deleted file mode 100644
index 9f5f845..0000000
--- a/fubar/example1.py
+++ /dev/null
@@ -1,35 +0,0 @@
-import opengm
-import opengm.learning as learning
-from opengm import numpy
-
-
-
-# weight vector
-nWeights = 100
-weightVals = numpy.ones(nWeights)*0.5
-weights = opengm.learning.Weights(weightVals)
-
-
-
-dataset =learning.createDataset(loss='h')
-
-print "type of dataset", dataset
-
-
-# for grid search learner
-lowerBounds = numpy.zeros(nWeights)
-upperBounds = numpy.ones(nWeights)
-nTestPoints  =numpy.ones(nWeights).astype('uint64')*10
-
-
-learner = learning.gridSearchLearner(dataset=dataset,lowerBounds=lowerBounds, upperBounds=upperBounds,nTestPoints=nTestPoints)
-
-learner.learn(infCls=opengm.inference.BeliefPropagation, 
-              parameter=opengm.InfParam(damping=0.5))
-
-# for struct max margin learner
-smm_learnerParam = learning.StructMaxMargin_Bundle_HammingLossParameter(1.0, 0.01, 0)
-smm_learner = learning.StructMaxMargin_Bundle_HammingLoss(dataset, smm_learnerParam)
-smm_learner.learn(infCls=opengm.inference.Icm)
-smm_learner2 = learning.structMaxMarginLearner(dataset, 1.0, 0.001, 0)
-smm_learner2.learn(infCls=opengm.inference.BeliefPropagation, parameter=opengm.InfParam(damping=0.5))
diff --git a/fubar/make_grid_potts_dset.py b/fubar/make_grid_potts_dset.py
deleted file mode 100644
index 8b13789..0000000
--- a/fubar/make_grid_potts_dset.py
+++ /dev/null
@@ -1 +0,0 @@
-
diff --git a/fubar/make_mc_dset.py b/fubar/make_mc_dset.py
deleted file mode 100644
index 9fc4cc8..0000000
--- a/fubar/make_mc_dset.py
+++ /dev/null
@@ -1,19 +0,0 @@
-import numpy
-import opengm
-from opengm import learning
-import vigra
-from progressbar import *
-from functools import partial
-
-
-getPbar
-
-
-def make_mmwc_dataset(nSemanticClasses, modelSizes, edges, nodeFeatures, edgeFeatures, allowCutsWithin):
-
-    assert len(modelSize)==len(edges)
-    assert len(edges) == len(nodeFeatures)
-    assert len(edges) == len(nodeFeatures)
-    
-    for modelSize,edge
-
diff --git a/fubar/max_likelihood_example.py b/fubar/max_likelihood_example.py
deleted file mode 100644
index 059cc4e..0000000
--- a/fubar/max_likelihood_example.py
+++ /dev/null
@@ -1,40 +0,0 @@
-import opengm
-import opengm.learning as learning
-from opengm import numpy
-
-# create a simple model with exactly one variable with two labels
-numWeights = 4
-nLabels = 2
-nVars = 1
-
-# set weight ids and features for all labels
-weightIds = numpy.array([[0, 1],       [2,3]])
-features = numpy.array( [[0.5, -0.25], [-0.5, -1.25]])
-
-# create dataset with 2 weights and get the 2 weights
-dataset = learning.createDataset(numWeights)
-weights = dataset.getWeights()
-
-# set up graphical model
-gm = opengm.gm(numpy.ones(nVars)*nLabels)
-fid = gm.addFunction(learning.lUnaryFunction(weights, 2, features, weightIds))
-gm.addFactor(fid, [0])
-
-# add graphical model to dataset with ground truth
-ground_truth = numpy.array([0]).astype(opengm.label_type)
-dataset.pushBackInstance(gm, ground_truth)
-
-# set up learner and run
-#learner = learning.structMaxMarginLearner(dataset, 0.1, 0.001, 0)
-#learner =  learning.subgradientSSVM(dataset, learningRate=1.0, C=100, learningMode='batch')
-#learner.learn(infCls=opengm.inference.TrwsExternal,  parameter=opengm.InfParam())
-
-
-learner = learning.maxLikelihoodLearner(
-    dataset,
-    maximumNumberOfIterations =1500,gradientStepSize = 0.9,weightStoppingCriteria =   0.001,gradientStoppingCriteria = 0.00000000001,infoFlag = True,infoEveryStep = False,weightRegularizer = 1.0, 
-    beliefPropagationMaximumNumberOfIterations = 20,beliefPropagationConvergenceBound = 0.0000000000001,beliefPropagationDamping = 0.5,beliefPropagationTemperature = 1,beliefPropagationIsAcyclic=opengm.Tribool(True))
-learner.learn()
-
-for w in range(numWeights):
-    print weights[w]
diff --git a/fubar/noisy_squares_3_labels.py b/fubar/noisy_squares_3_labels.py
deleted file mode 100644
index 2d9bf7a..0000000
--- a/fubar/noisy_squares_3_labels.py
+++ /dev/null
@@ -1,121 +0,0 @@
-import numpy
-import opengm
-from opengm import learning
-import vigra
-from progressbar import *
-import glob
-import os
-from functools import partial
-from opengm.learning import secondOrderImageDataset, getPbar
-
-numpy.random.seed(42)
-
-nImages = 8 
-shape = [15, 15]
-noise = 2.0
-imgs = []
-gts = []
-
-
-for i in range(nImages):
-
-    gtImg = numpy.zeros(shape)
-    gtImg[0:shape[0]/2,:] = 1
-
-    gtImg[shape[0]/4: 3*shape[0]/4, shape[0]/4: 3*shape[0]/4]  = 2
-
-    ra = numpy.random.randint(180)
-    #print ra 
-
-    gtImg = vigra.sampling.rotateImageDegree(gtImg.astype(numpy.float32),int(ra),splineOrder=0)
-
-    if i<1 :
-        vigra.imshow(gtImg)
-        vigra.show()
-
-    img = gtImg + numpy.random.random(shape)*float(noise)
-    if i<1 :
-        vigra.imshow(img)
-        vigra.show()
-
-    imgs.append(img.astype('float32'))
-    gts.append(gtImg)
-
-
-
-
-
-
-
-def getSelf(img):
-    return img
-
-
-def getSpecial(img, sigma):
-    simg = vigra.filters.gaussianSmoothing(img, sigma=sigma)
-
-    img0  = simg**2
-    img1  = (simg - 1.0)**2
-    img2  = (simg - 2.0)**2
-
-    img0=img0[:,:,None]
-    img1=img1[:,:,None]
-    img2=img2[:,:,None]
-
-
-    return numpy.concatenate([img0,img1,img2],axis=2)
-
-
-fUnary = [
-    partial(getSpecial, sigma=0.5),
-    partial(getSpecial, sigma=1.0)
-]
-
-fBinary = [
-    partial(vigra.filters.gaussianGradientMagnitude, sigma=0.5),
-    partial(vigra.filters.gaussianGradientMagnitude, sigma=1.0),
-    partial(vigra.filters.gaussianGradientMagnitude, sigma=1.5),
-    partial(vigra.filters.gaussianGradientMagnitude, sigma=2.0),
-    partial(vigra.filters.gaussianGradientMagnitude, sigma=3.0),
-]
-
-
-dataset,test_set = secondOrderImageDataset(imgs=imgs, gts=gts, numberOfLabels=3, 
-                                          fUnary=fUnary, fBinary=fBinary, 
-                                          addConstFeature=True)
-
-
-
-
-
-
-
-learningModi = ['normal','reducedinference','selfFusion','reducedinferenceSelfFusion']
-lm = 0
-
-
-infCls = opengm.inference.TrwsExternal
-param = opengm.InfParam()
-
-
-learner =  learning.subgradientSSVM(dataset, learningRate=1.0, C=0.9, learningMode='batch',maxIterations=5, averaging=-1)
-learner.learn(infCls=infCls,parameter=param,connectedComponents=False,infMode='n')
-
-learner =  learning.rws(dataset, learningRate=1.0, C=1.0,maxIterations=5000, p=100, sigma=1.3)
-learner.learn(infCls=infCls,parameter=param,connectedComponents=False,infMode='n')
-
-
-# predict on test test
-for (rgbImg, gtImg, gm) in test_set :
-    # infer for test image
-    inf = opengm.inference.TrwsExternal(gm)
-    inf.infer()
-    arg = inf.arg()
-    arg = arg.reshape( numpy.squeeze(gtImg.shape))
-
-    vigra.imshow(rgbImg)
-    vigra.show()
-
-    vigra.imshow(arg+2)
-    vigra.show()
-    break
diff --git a/fubar/pascal_voc/convertPascalVOC.py b/fubar/pascal_voc/convertPascalVOC.py
deleted file mode 100644
index ef3b9c4..0000000
--- a/fubar/pascal_voc/convertPascalVOC.py
+++ /dev/null
@@ -1,94 +0,0 @@
-import cPickle as pickle
-import numpy as np
-import opengm
-from opengm import learning
-import h5py
-
-# download the Pascal VOC datasets from
-# 
-# http://www.ais.uni-bonn.de/deep_learning/downloads.html
-# 
-
-# converter from pystruct EdgeFeatureGraphCRF to opengm learnable
-fns = ['./data_train.pickle', './data_val.pickle']
-ds_suffixes = ['_train', '_val']
-
-num_samples = 5 # None for all
-out_dir = './'
-out_prefix = 'pascal_voc'
-
-num_labels = 21
-
-# get label weights from training set:
-# loss should be weighted inversely by the number of 
-# occurrences of each class in the training set    
-Y = pickle.load(open(fns[0], 'r'))['Y'][:num_samples]
-Y = np.hstack(Y)
-Y[Y==-1]=0 # FIXME: introduce a void label, so long: make the void label background 
-label_weights = 1. / np.bincount(Y)
-label_weights[np.isinf(label_weights)] = 0
-label_weights *= 1. / np.sum(label_weights)
-
-for fn, ds_suffix in zip(fns, ds_suffixes):
-    ds = pickle.load(open(fn, 'r'))
-
-    # X is a list of instances of a dataset where (for EdgeFeatureGraphCRF)
-    # each instance is a tuple of (unary_feats, edges, edge_feats)
-    X = ds['X'][:num_samples]
-
-    # the ground truth labels
-    Y = ds['Y'][:num_samples]
-
-    # superpixels (for reference)
-    #superpixels_train = ds['superpixels'][:num_samples]
-
-    # filenames (for reference)
-    #fns_train = ds['file_names'][:num_samples]
-
-    num_edge_feats = X[0][2].shape[1]
-    num_unary_feats = num_labels * X[0][0].shape[1]
-    num_weights = num_unary_feats + num_edge_feats
-    # create and initialize weights
-    print 'num_weights =', num_weights
-    print 'num_instances =', len(X)
-    ogm_ds = learning.createDataset(num_weights, numInstances=len(X), loss="generalized-hamming")
-    weights = ogm_ds.getWeights()
-
-    for idx, (x, y) in enumerate(zip(X, Y)):
-        y[y==-1]=0  # FIXME: introduce a void label, so long: make the void label background 
-        unary_feats, edges, edge_feats = x
-        num_vars = unary_feats.shape[0]
-
-        states = np.ones(num_vars, dtype=opengm.index_type) * num_labels
-        
-        gm = opengm.graphicalModel(states, operator='adder')
-
-        lossParam = learning.GeneralizedHammingLossParameter()
-        lossParam.setLabelLossMultiplier(np.array(label_weights))
-
-        # add unary factors
-        weight_ids = np.arange(0, num_labels * unary_feats.shape[1]).reshape((num_labels, -1))
-        for feat_idx, unary_feat in enumerate(unary_feats):
-            # make that each label sees all features, but use their own weights
-            unary_feat_array = np.repeat(unary_feat.reshape((-1,1)), num_labels, axis=1)
-            f = learning.lUnaryFunction(weights, num_labels, unary_feat_array, weight_ids)
-            var_idxs = np.array([feat_idx], dtype=np.uint64)
-            fid = gm.addFunction(f)
-            gm.addFactor(fid, var_idxs)
-        #var_idxs = np.arange(0, num_vars, dtype=np.uint64)
-        #gm.addFactors(fids, var_idxs)
-
-        # add pairwise factors
-        for edge, edge_feat in zip(edges, edge_feats):
-            var_idxs = edge.astype(opengm.index_type)
-            weight_ids = np.arange(num_unary_feats, num_unary_feats+num_edge_feats, dtype=opengm.index_type)
-            f = opengm.LPottsFunction(weights=weights, numberOfLabels=num_labels,
-                                      weightIds=weight_ids, features=edge_feat)
-            fid = gm.addFunction(f)
-            gm.addFactor(fid, var_idxs)
-
-        print idx, y.shape, lossParam
-        ogm_ds.setInstanceWithLossParam(idx, gm, y.astype(dtype=opengm.label_type), lossParam)
-
-    ogm_ds.save(out_dir, out_prefix + ds_suffix + '_')
-
diff --git a/fubar/pascal_voc/convertPascalVOCNew.py b/fubar/pascal_voc/convertPascalVOCNew.py
deleted file mode 100644
index b85b121..0000000
--- a/fubar/pascal_voc/convertPascalVOCNew.py
+++ /dev/null
@@ -1,108 +0,0 @@
-import cPickle as pickle
-import numpy as np
-import opengm
-from opengm import learning
-import h5py
-
-# download the Pascal VOC datasets from
-# 
-# http://www.ais.uni-bonn.de/deep_learning/downloads.html
-# 
-
-# converter from pystruct EdgeFeatureGraphCRF to opengm learnable
-fns = ['./data_train.pickle', './data_val.pickle']
-ds_suffixes = ['_train']#, '_val']
-ogm_dss = [None, None]
-ww = [None, None]
-num_samples = None
-out_dir = './'
-out_prefix = 'pascal_voc'
-
-num_labels = 21
-
-# get label weights from training set:
-# loss should be weighted inversely by the number of 
-# occurrences of each class in the training set    
-Y = pickle.load(open(fns[0], 'r'))['Y'][:num_samples]
-Y = np.hstack(Y)
-Y[Y==-1]=0 # FIXME: introduce a void label, so long: make the void label background 
-label_weights = 1. / np.bincount(Y)
-label_weights= np.ones(27,dtype=opengm.value_type)
-label_weights[np.isinf(label_weights)] = 0
-label_weights *= 1. / np.sum(label_weights)
-
-for ii, (fn, ds_suffix) in enumerate(zip(fns, ds_suffixes)):
-    ds = pickle.load(open(fn, 'r'))
-
-    # X is a list of instances of a dataset where (for EdgeFeatureGraphCRF)
-    # each instance is a tuple of (unary_feats, edges, edge_feats)
-    X = ds['X'][:num_samples]
-
-    # the ground truth labels
-    Y = ds['Y'][:num_samples]
-
-    # superpixels (for reference)
-    #superpixels_train = ds['superpixels'][:num_samples]
-
-    # filenames (for reference)
-    #fns_train = ds['file_names'][:num_samples]
-
-    num_edge_feats = X[0][2].shape[1]
-    num_unary_feats = num_labels * X[0][0].shape[1]
-    num_weights = num_unary_feats + num_edge_feats
-    # create and initialize weights
-    print 'num_weights =', num_weights
-    print 'num_instances =', len(X)
-
-    ogm_dss[ii] = learning.createDataset(num_weights, numInstances=len(X))
-    #ogm_ds = ogm_dss[ii]
-    ww[ii] = ogm_dss[ii].getWeights()
-
-    for idx, (x, y) in enumerate(zip(X, Y)):
-        print idx
-        y[y==-1]=0  # FIXME: introduce a void label, so long: make the void label background 
-        unary_feats, edges, edge_feats = x
-        num_vars = unary_feats.shape[0]
-
-        states = np.ones(num_vars, dtype=opengm.label_type) * num_labels
-        
-        gm = opengm.gm(states, operator='adder')
-
-        lossParam =  learning.LossParameter(lossType='hamming', labelMult=label_weights)
-        lossParam.setLabelLossMultiplier(label_weights)
-
-        # add unary factors
-        weight_ids = np.arange(0, num_labels * unary_feats.shape[1]).reshape((num_labels, -1))
-
-            
-            
-        # the features are different for each function instance
-        # but a single feature vector is shared between all
-        # labels for one particular instance.
-        # The weights the same for all function instances
-        # but each label has a particular set of weights
-        lUnaries = learning.lUnaryFunctions(weights = ww[ii],numberOfLabels = num_labels, 
-                                            features=unary_feats,weightIds = weight_ids,
-                                            featurePolicy= learning.FeaturePolicy.sharedBetweenLabels)
-        gm.addFactors(gm.addFunctions(lUnaries), np.arange(num_vars)) 
-
-
-
-        # add all pairwise factors at once
-        weight_ids = np.arange(num_unary_feats, num_unary_feats+num_edge_feats)
-        lp = learning.lPottsFunctions(weights=ww[ii], numberOfLabels=num_labels,
-                                     features=edge_feats, weightIds=weight_ids)
-        gm.addFactors(gm.addFunctions(lp), edges) 
-
-
-        # add the model to the dataset
-        ogm_dss[ii].setInstanceWithLossParam(idx, gm, y.astype(dtype=opengm.label_type), lossParam)
-
-
-    ogm_dss[ii].save(out_dir, out_prefix + ds_suffix + '_')
-
-
-
-
-
-
diff --git a/fubar/pascal_voc/learnPascalVOC.py b/fubar/pascal_voc/learnPascalVOC.py
deleted file mode 100644
index c6f0201..0000000
--- a/fubar/pascal_voc/learnPascalVOC.py
+++ /dev/null
@@ -1,35 +0,0 @@
-import opengm
-from opengm import learning
-import numpy as np
-
-out_dir = './'
-out_prefix = 'pascal_voc_train_'
-
-dataset = learning.createDataset(0, loss='gh')
-#dataset = learning.DatasetWithGeneralizedHammingLoss(0)
-dataset.load(out_dir, out_prefix)
-
-nWeights = dataset.getNumberOfWeights()
-print 'nWeights', nWeights
-print 'nModels', dataset.getNumberOfModels()
-
-# for grid search learner
-lowerBounds = np.ones(nWeights)*-1.0
-upperBounds = np.ones(nWeights)*1.0
-nTestPoints  =np.ones(nWeights).astype('uint64')*3
-
-#learner = learning.gridSearchLearner(dataset=dataset,lowerBounds=lowerBounds, upperBounds=upperBounds,nTestPoints=nTestPoints)
-learner = learning.structMaxMarginLearner(dataset, 1.0, 0.001, 0)
-
-learner.learn(infCls=opengm.inference.Icm,
-              parameter=opengm.InfParam())
-
-weights = dataset.getWeights()
-
-for w in range(nWeights):
-    print weights[w]
-
-for i in range(dataset.getNumberOfModels()):
-    print 'loss of', i, '=', dataset.getLoss(i,infCls=opengm.inference.Icm,parameter=opengm.InfParam())
-
-print 'total loss =', dataset.getLoss(i,infCls=opengm.inference.Icm,parameter=opengm.InfParam())
diff --git a/fubar/python_stuff.py b/fubar/python_stuff.py
deleted file mode 100644
index 852ea8a..0000000
--- a/fubar/python_stuff.py
+++ /dev/null
@@ -1,102 +0,0 @@
-import opengm
-import numpy
-from opengm import learning
-np = numpy
-
-
-numLabels = 3
-numVar = 6
-
-
-
-
-#################################################################
-# add a unary function 
-##################################################################
-
-print opengm.learning.DatasetWithHammingLoss
-print opengm.learning.HammingLoss
-
-# make the gm
-space = numpy.ones(numVar)*numLabels
-gm = opengm.gm(space)
-
-
-
-weightVals = numpy.ones(100)*1.0
-weights = opengm.learning.Weights(weightVals)
-
-
-
-
-##################################################################
-# add a unary function 
-##################################################################
-features  = numpy.ones([numLabels, 2], dtype=opengm.value_type)
-weightIds = numpy.ones([numLabels, 2], dtype=opengm.index_type)
-
-# set up weight ids for each label
-weightIds[0,:] = [0, 1]
-weightIds[1,:] = [2, 3]
-weightIds[2,:] = [4, 5]
-
-print "add f"
-f = opengm.LUnaryFunction(weights=weights, numberOfLabels=numLabels, 
-                          weightIds=weightIds, features=features)
-print "add factor"
-fid = gm.addFunction(f)
-gm.addFactor(fid, [0])
-
-print "features",features
-print "unary",np.array(gm[0])
-
-weights[4] = 0.5
-print "unary",np.array(gm[0])
-
-
-##################################################################
-# add a unary function                                           
-##################################################################
-features  = [
-    numpy.array([1.0, 1.0],             dtype=opengm.value_type),
-    numpy.array([1.0, 1.0, 1.0],        dtype=opengm.value_type),
-    numpy.array([1.0, 1.0, 1.0, 1.0],   dtype=opengm.value_type)
-]
-
-weightIds  = [
-    numpy.array([6, 7],             dtype=opengm.index_type),
-    numpy.array([8, 9, 10],        dtype=opengm.index_type),
-    numpy.array([11, 12, 13, 14],   dtype=opengm.index_type)
-]
-
-
-print "add f"
-f = opengm.LUnaryFunction(weights=weights, numberOfLabels=numLabels, 
-                          weightIds=weightIds, features=features)
-print "add factor"
-fid = gm.addFunction(f)
-gm.addFactor(fid, [0])
-
-print "features",features
-print "unary",np.array(gm[1])
-
-
-print "unary",np.array(gm[1])
-
-
-
-
-
-##################################################################
-# add a potts function
-##################################################################
-features = numpy.array([1.0, 5.0]).astype(opengm.value_type)
-weightIds = numpy.array([6,7]).astype(opengm.index_type)
-f = opengm.LPottsFunction(weights=weights, numberOfLabels=numLabels, 
-                          weightIds=weightIds, features=features)
-
-# add factor
-fid = gm.addFunction(f)
-gm.addFactor(fid, [0,1])
-
-
diff --git a/fubar/real_example.py b/fubar/real_example.py
deleted file mode 100644
index f21fe37..0000000
--- a/fubar/real_example.py
+++ /dev/null
@@ -1,96 +0,0 @@
-import opengm
-import opengm.learning as learning
-from opengm import numpy
-import vigra
-
-nModels = 1
-nLables = 2 
-shape = [6, 6]
-numVar = shape[0]*shape[1]
-nWeights = 4
-
-def makeGt(shape):
-    gt=numpy.ones(shape,dtype='uint8')
-    gt[0:shape[0]/2,:] = 0
-    return gt
-
-uWeightIds = numpy.arange(4,dtype='uint64').reshape(2,2)
-print uWeightIds
-
-bWeightIds = numpy.array([4,5,6],dtype='uint64')
-
-dataset = learning.createDataset(numWeights=nWeights, loss='h')
-weights = dataset.getWeights()
-
-def makeFeatures(gt):
-    random  = numpy.random.rand(*gt.shape)-0.5
-    randGt = random + gt
-    feat = []
-    for sigma in [1.0, 1.5]:
-        feat.append(vigra.filters.gaussianSmoothing(randGt.astype('float32'),sigma) )
-
-    featB = []
-    for sigma in [1.0, 1.5]:
-        featB.append(vigra.filters.gaussianGradientMagnitude(randGt.astype('float32'),sigma) )
-
-
-
-    a =  numpy.rollaxis(numpy.array(feat), axis=0, start=3)
-    b =  numpy.rollaxis(numpy.array(featB), axis=0, start=3)
-    return a,b
-
-for mi in range(nModels):
-
-
-    gm = opengm.gm(numpy.ones(numVar)*nLables)
-    gt = makeGt(shape) 
-    gtFlat = gt.reshape([-1])
-
-    unaries,binaries = makeFeatures(gt)
-
-    # print unaries, binaries
-
-    for x in range(shape[0]):
-        for y in range(shape[1]):
-            uFeat = unaries[x,y,:].astype("float64")
-            uFeat = numpy.repeat(uFeat[:,numpy.newaxis],2,axis=1).T
-            uFeat[1,:]=1
-
-            lu = opengm.LUnaryFunction(weights=weights,numberOfLabels=nLables, features=uFeat, weightIds=uWeightIds)
-
-
-            fid= gm.addFunction(lu)
-            gm.addFactor(fid, y+x*shape[1])
-
-    if False:
-        for x in range(shape[0]):
-            for y in range(shape[1]):
-
-                if x+1 < shape[0]:
-                    bFeat = numpy.append(binaries[x,y,:], [1]).astype(opengm.value_type) +  numpy.append(binaries[x+1,y,:], [1]).astype(opengm.value_type)
-                    pf = opengm.LPottsFunction(weights=weights,numberOfLabels=nLables, features=bFeat, weightIds=bWeightIds)
-                    fid= gm.addFunction(pf)
-                    gm.addFactor(fid, [y+x*shape[1], y+(x+1)*shape[1]])
-                if y+1 < shape[1]:
-                    bFeat = numpy.append(binaries[x,y,:], [1]).astype(opengm.value_type) + numpy.append(binaries[x,y+1,:], [1]).astype(opengm.value_type)
-                    pf = opengm.LPottsFunction(weights=weights,numberOfLabels=nLables, features=bFeat, weightIds=bWeightIds)
-                    fid= gm.addFunction(pf)
-                    gm.addFactor(fid, [y+x*shape[1], y+1+x*shape[1]])
-
-    dataset.pushBackInstance(gm,gtFlat.astype(opengm.label_type))
-    backGt = dataset.getGT(0)
-
-
-# for grid search learner
-lowerBounds = numpy.ones(nWeights)*-1.0
-upperBounds = numpy.ones(nWeights)*1.0
-nTestPoints  =numpy.ones(nWeights).astype('uint64')*10
-
-learner = learning.gridSearchLearner(dataset=dataset,lowerBounds=lowerBounds, upperBounds=upperBounds,nTestPoints=nTestPoints)
-#learner = learning.structMaxMarginLearner(dataset, 1.0, 0.001, 0)
-
-learner.learn(infCls=opengm.inference.Icm, 
-              parameter=opengm.InfParam())
-
-for w in range(nWeights):
-    print weights[w]
diff --git a/fubar/real_example_2.py b/fubar/real_example_2.py
deleted file mode 100644
index e14c607..0000000
--- a/fubar/real_example_2.py
+++ /dev/null
@@ -1,150 +0,0 @@
-import opengm
-import opengm.learning as learning
-from opengm import numpy
-import vigra
-import pylab as plt
-import pylab
-
-#nModels = 20
-nModels = 2
-nLables = 2 
-#shape = [50, 50]
-shape = [16, 16]
-numVar = shape[0]*shape[1]
-
-sSmooth = [1.0,1.2, 1.5, 2.0, 3.0, 4.0]
-sGrad = [1.0, 1.5, 2.0, 4.0]
-
-nUWeights = len(sSmooth) + 1
-nBWeights = len(sGrad) + 1
-nWeights = nUWeights + nBWeights
-
-def makeGt(shape):
-    gt=numpy.ones(shape,dtype='uint8')
-    gt[0:shape[0]/2,:] = 0
-    return gt
- 
-
-
-weightVals = numpy.ones(nWeights)
-weights = opengm.learning.Weights(weightVals)
-
-uWeightIds = numpy.arange(nUWeights ,dtype='uint64')
-bWeightIds = numpy.arange(start=nUWeights,stop=nWeights,dtype='uint64')
-
-
-dataset = learning.createDataset(numWeights=nWeights)
-weights = dataset.getWeights()
-
-def makeFeatures(gt):
-    random  = (numpy.random.rand(*gt.shape)-0.5)*5.0
-    randGt = random + gt
-
-    # vigra.imshow(randGt)
-    # plt.colorbar()
-    # vigra.show()
-
-    #f = pylab.figure()
-    #for n, a in enumerate([gt, randGt]):
-    #    f.add_subplot(2, 1, n)  # this line outputs images on top of each other
-    #    # f.add_subplot(1, 2, n)  # this line outputs images side-by-side
-    #    pylab.imshow(a,cmap='gray')
-    #pylab.title('Double image')
-    #pylab.show()
-
-
-
-    feat = []
-    for sigma in sSmooth:
-        feat.append(vigra.filters.gaussianSmoothing(randGt.astype('float32'),sigma) )
-
-        #vigra.imshow(feat[-1])
-        #plt.colorbar()
-        #vigra.show()
-
-
-    featB = []
-    for sigma in sGrad:
-        featB.append(vigra.filters.gaussianGradientMagnitude(randGt.astype('float32'),sigma) )
-
-    a=None
-    b=None
-    if len(feat)>0:    
-        a =  numpy.rollaxis(numpy.array(feat), axis=0, start=3)
-    if len(featB)>0:
-        b =  numpy.rollaxis(numpy.array(featB), axis=0, start=3)
-    return a,b
-
-for mi in range(nModels):
-    #print mi
-
-    gm = opengm.gm(numpy.ones(numVar)*nLables)
-    gt = makeGt(shape) 
-    gtFlat = gt.reshape([-1])
-
-    unaries, binaries = makeFeatures(gt)
-
-    # print unaries, binaries
-
-
-    for x in range(shape[0]):
-        for y in range(shape[1]):
-            uFeat = numpy.append(unaries[x,y,:],[1])
-
-            #print uFeat
-            #print uWeightIds
-            #print(unaries[x,y,:])
-            #print(unaries.shape)
-            #print(uFeat)
-            #print(uFeat.shape)
-
-            lu = learning.lUnaryFunction(weights=weights,numberOfLabels=nLables, 
-                                         features=uFeat, weightIds=uWeightIds)
-            fid = gm.addFunction(lu)
-            facIndex = gm.addFactor(fid, y+x*shape[1])
-            #facIndex = gm.addFactor(fid, x+y*shape[0])
-
-    if True:
-        for x in range(shape[0]):
-            for y in range(shape[1]):
-
-                if x+1 < shape[0]:
-                    bFeat = numpy.append(binaries[x,y,:], [1])+numpy.append(binaries[x+1,y,:], [1])
-                    pf = opengm.LPottsFunction(weights=weights,numberOfLabels=nLables, features=bFeat, weightIds=bWeightIds)
-                    fid= gm.addFunction(pf)
-                    gm.addFactor(fid, [y+x*shape[1], y+(x+1)*shape[1]])
-                if y+1 < shape[1]:
-                    bFeat = numpy.append(binaries[x,y,:], [1]).astype(opengm.value_type) + numpy.append(binaries[x,y+1,:], [1]).astype(opengm.value_type)
-                    pf = opengm.LPottsFunction(weights=weights,numberOfLabels=nLables, features=bFeat, weightIds=bWeightIds)
-                    fid= gm.addFunction(pf)
-                    gm.addFactor(fid, [y+x*shape[1], y+1+x*shape[1]])
-
-    dataset.pushBackInstance(gm,gtFlat.astype(opengm.label_type))
-    backGt = dataset.getGT(0)
-
-    #print "back",backGt
-    #sys.exit()
-
-# for grid search learner
-lowerBounds = numpy.ones(nWeights)*-2.0
-upperBounds = numpy.ones(nWeights)*2.0
-nTestPoints  =numpy.ones(nWeights).astype('uint64')*5
-
-#learner = learning.gridSearchLearner(dataset=dataset,lowerBounds=lowerBounds, upperBounds=upperBounds,nTestPoints=nTestPoints)
-#learner =  learning.structPerceptron(dataset, decayExponent=-0.5, learningMode='batch')
-#learner = learning.structMaxMarginLearner(dataset, 0.1, 0.001, 0)
-#learner =  learning.subgradientSSVM(dataset, learningRate=1.0, C=100, learningMode='batch')
-
-
-#learner.learn(infCls=opengm.inference.TrwsExternal,
-#              parameter=opengm.InfParam())
-
-
-learner = learning.maxLikelihoodLearner(
-    dataset,
-    maximumNumberOfIterations =1000,gradientStepSize=0.9,weightStoppingCriteria=0.001,gradientStoppingCriteria=0.01,infoFlag=True,infoEveryStep=False,weightRegularizer=1.0,
-    beliefPropagationMaximumNumberOfIterations=5,beliefPropagationConvergenceBound=0.0001,beliefPropagationDamping=0.5,beliefPropagationTemperature=0.3,beliefPropagationIsAcyclic=opengm.Tribool(False))
-learner.learn()
-
-for w in range(nWeights):
-    print weights[w]
diff --git a/fubar/simple_sp.py b/fubar/simple_sp.py
deleted file mode 100644
index 1d9c5ea..0000000
--- a/fubar/simple_sp.py
+++ /dev/null
@@ -1,165 +0,0 @@
-import numpy
-import opengm
-from opengm import learning
-import vigra
-from progressbar import *
-import glob
-import os
-from functools import partial
-from opengm.learning import secondOrderImageDataset, getPbar,superpixelDataset
-
-
-
-
-nImages = 20 
-shape = [100, 100]
-noise = 8
-imgs = []
-gts = []
-sps = []
-
-
-
-pbar = getPbar((nImages), 'Load Image')
-pbar.start()
-
-for i in range(nImages):
-
-    gtImg = numpy.zeros(shape)
-    gtImg[0:shape[0]/2,:] = 1
-
-    gtImg[shape[0]/4: 3*shape[0]/4, shape[0]/4: 3*shape[0]/4]  = 2
-
-    ra = numpy.random.randint(180)
-    #print ra 
-
-    gtImg = vigra.sampling.rotateImageDegree(gtImg.astype(numpy.float32),int(ra),splineOrder=0)
-
-    if i<1 :
-        vigra.imshow(gtImg)
-        vigra.show()
-
-    img = gtImg + numpy.random.random(shape)*float(noise)
-    if i<1 :
-        vigra.imshow(img)
-        vigra.show()
-
-
-
-    sp,nSeg  = vigra.analysis.slicSuperpixels(gtImg, intensityScaling=0.2, seedDistance=5)
-    sp = vigra.analysis.labelImage(sp)-1
-
-
-    if i<1:
-        vigra.segShow(img, sp+1,edgeColor=(1,0,0))
-        vigra.show()
-
-
-    gg  = vigra.graphs.gridGraph(gtImg.shape[0:2])
-    rag = vigra.graphs.regionAdjacencyGraph(gg,sp)
-
-    gt,qtq = rag.projectBaseGraphGt(gtImg)
-
-    #rag.show(img, gt)
-    #vigra.show()
-
-
-    imgs.append(img.astype('float32'))
-    gts.append(gt)
-    sps.append(sp)
-
-    pbar.update(i)
-
-
-pbar.finish()
-
-def getSelf(img):
-    return img
-
-
-def labHessianOfGaussian(img, sigma):
-    l = vigra.colors.transform_RGB2Lab(img)[:,:,0]
-    l = vigra.taggedView(l,'xy')
-    return vigra.filters.hessianOfGaussianEigenvalues(l, sigma)
-
-def labStructTensorEv(img, sigma):
-    l = vigra.colors.transform_RGB2Lab(img)[:,:,0]
-    l = vigra.taggedView(l,'xy')
-    return vigra.filters.structureTensorEigenvalues(l, sigma, 2*sigma)
-
-
-def getSelf(img):
-    return img
-
-
-def getSpecial(img, sigma):
-    simg = vigra.filters.gaussianSmoothing(img, sigma=sigma)
-
-    img0  = simg**2
-    img1  = (simg - 1.0)**2
-    img2  = (simg - 2.0)**2
-
-    img0=img0[:,:,None]
-    img1=img1[:,:,None]
-    img2=img2[:,:,None]
-
-
-    return numpy.concatenate([img0,img1,img2],axis=2)
-
-
-fUnary = [
-    partial(getSpecial, sigma=0.5),
-    partial(getSpecial, sigma=1.0)
-]
-
-fBinary = [
-    partial(vigra.filters.gaussianGradientMagnitude, sigma=0.5),
-    partial(vigra.filters.gaussianGradientMagnitude, sigma=1.0),
-    partial(vigra.filters.gaussianGradientMagnitude, sigma=1.5),
-    partial(vigra.filters.gaussianGradientMagnitude, sigma=2.0),
-    partial(vigra.filters.gaussianGradientMagnitude, sigma=3.0),
-]
-
-
-dataset,test_set = superpixelDataset(imgs=imgs,sps=sps, gts=gts, numberOfLabels=3, 
-                                          fUnary=fUnary, fBinary=fBinary, 
-                                          addConstFeature=True)
-if True :
-    dataset.save("simple_dataset", 'simple_')
-if True :
-    dataset = learning.createDataset(0,  numInstances=0)
-    dataset.load("simple_dataset", 'simple_')
-if True:
-
-    learner =  learning.subgradientSSVM(dataset, learningRate=0.1, C=100, 
-                                        learningMode='batch',maxIterations=1000, averaging=-1)
-    learner.learn(infCls=opengm.inference.TrwsExternal, 
-                  parameter=opengm.InfParam())
-
-else:
-    learner = learning.maxLikelihoodLearner(dataset, temp=0.0000001)
-    learner.learn()
-# predict on test test
-for (rgbImg, sp, gm) in test_set :
-    # infer for test image
-    inf = opengm.inference.TrwsExternal(gm)
-    inf.infer()
-    arg = inf.arg()+1
-
-
-
-    assert sp.min() == 0
-    assert sp.max() == arg.shape[0] -1
-
-    gg  = vigra.graphs.gridGraph(rgbImg.shape[0:2])
-    rag = vigra.graphs.regionAdjacencyGraph(gg,sp)
-
-    seg = rag.projectLabelsToBaseGraph(arg.astype('uint32'))
-
-    vigra.imshow(rgbImg)
-    vigra.show()
-
-
-    vigra.imshow(seg)
-    vigra.show()
-
diff --git a/fubar/toy_dataset.py b/fubar/toy_dataset.py
deleted file mode 100644
index acd70e1..0000000
--- a/fubar/toy_dataset.py
+++ /dev/null
@@ -1,125 +0,0 @@
-import numpy
-import opengm
-from opengm import learning
-import vigra
-from progressbar import *
-import glob
-import os
-from functools import partial
-from opengm.learning import secondOrderImageDataset, getPbar
-
-numpy.random.seed(42)
-
-nImages = 8 
-shape = [30, 30]
-noise = 2.0
-imgs = []
-gts = []
-
-
-for i in range(nImages):
-
-    gtImg = numpy.zeros(shape)
-    gtImg[0:shape[0]/2,:] = 1
-
-    gtImg[shape[0]/4: 3*shape[0]/4, shape[0]/4: 3*shape[0]/4]  = 2
-
-    ra = numpy.random.randint(180)
-    #print ra 
-
-    gtImg = vigra.sampling.rotateImageDegree(gtImg.astype(numpy.float32),int(ra),splineOrder=0)
-
-    if i<2 :
-        vigra.imshow(gtImg)
-        vigra.show()
-
-    img = gtImg + numpy.random.random(shape)*float(noise)
-    if i<2 :
-        vigra.imshow(img)
-        vigra.show()
-
-    imgs.append(img.astype('float32'))
-    gts.append(gtImg)
-
-
-
-
-
-
-
-def getSelf(img):
-    return img
-
-
-def getSpecial(img, sigma):
-    simg = vigra.filters.gaussianSmoothing(img, sigma=sigma)
-
-    img0  = simg**2
-    img1  = (simg - 1.0)**2
-    img2  = (simg - 2.0)**2
-
-    img0=img0[:,:,None]
-    img1=img1[:,:,None]
-    img2=img2[:,:,None]
-
-
-    return numpy.concatenate([img0,img1,img2],axis=2)
-
-
-fUnary = [
-    partial(getSpecial, sigma=0.5),
-    partial(getSpecial, sigma=1.0)
-]
-
-fBinary = [
-    partial(vigra.filters.gaussianGradientMagnitude, sigma=0.5),
-    partial(vigra.filters.gaussianGradientMagnitude, sigma=1.0),
-    partial(vigra.filters.gaussianGradientMagnitude, sigma=1.5),
-    partial(vigra.filters.gaussianGradientMagnitude, sigma=2.0),
-    partial(vigra.filters.gaussianGradientMagnitude, sigma=3.0),
-]
-
-
-dataset,test_set = secondOrderImageDataset(imgs=imgs, gts=gts, numberOfLabels=3, 
-                                          fUnary=fUnary, fBinary=fBinary, 
-                                          addConstFeature=True)
-
-
-
-
-
-
-
-learningModi = ['normal','reducedinference','selfFusion','reducedinferenceSelfFusion']
-lm = 0
-
-
-infCls = opengm.inference.TrwsExternal
-param = opengm.InfParam()
-
-if False:
-    print "construct learner"
-    learner = learning.maxLikelihoodLearner(dataset)
-    print "start to learn"
-    learner.learn()
-    print "exit"
-
-else:
-   learner =  learning.subgradientSSVM(dataset, learningRate=0.5, C=100, learningMode='batch',maxIterations=500,averaging=-1,nConf=0)
-   learner.learn(infCls=infCls,parameter=param,connectedComponents=True,infMode='n')
-
-
-# predict on test test
-for (rgbImg, gtImg, gm) in test_set :
-    # infer for test image
-    inf = opengm.inference.TrwsExternal(gm)
-    inf.infer()
-    arg = inf.arg()
-    arg = arg.reshape( numpy.squeeze(gtImg.shape))
-
-    vigra.imshow(rgbImg)
-    vigra.show()
-
-    vigra.imshow(arg+2)
-    vigra.show()
-
diff --git a/fubar/whorse.py b/fubar/whorse.py
deleted file mode 100644
index a8f7851..0000000
--- a/fubar/whorse.py
+++ /dev/null
@@ -1,79 +0,0 @@
-import numpy
-import opengm
-from opengm import learning
-import vigra
-from progressbar import *
-
-from functools import partial
-from make_grid_potts_dset import secondOrderImageDataset
-
-# where is the dataset stored
-dsetRoot = '/home/tbeier/datasets/weizmann_horse_db/'
-imgPath = dsetRoot + 'rgb/'
-gtPath = dsetRoot + 'figure_ground/'
-    
-# how many should be loaded
-# (all if None)
-loadN = 20
-takeNth  = 3
-if loadN is None:
-    loadN = 0
-
-imgs = []
-gt = []
-
-for i in range(1,loadN+1):
-
-    hName = "horse%03d.jpg" % (i,)
-    rgbImg  = vigra.impex.readImage(imgPath+hName)
-    gtImg  = vigra.impex.readImage(gtPath+hName).astype('uint32')[::takeNth,::takeNth]
-    gtImg[gtImg<125] = 0
-    gtImg[gtImg>=125] = 1
-    rgbImg = vigra.resize(rgbImg, [gtImg.shape[0],gtImg.shape[1]])
-    imgs.append(rgbImg)
-    gt.append(gtImg)
-
-
-fUnary = [
-    vigra.colors.transform_RGB2Lab,
-    vigra.colors.transform_RGB2Luv,
-    partial(vigra.filters.gaussianGradientMagnitude, sigma=1.0),
-    partial(vigra.filters.gaussianGradientMagnitude, sigma=2.0),
-]
-
-fBinary = [
-    vigra.colors.transform_RGB2Lab,
-    vigra.colors.transform_RGB2Luv,
-    partial(vigra.filters.gaussianGradientMagnitude, sigma=1.0),
-    partial(vigra.filters.gaussianGradientMagnitude, sigma=2.0),
-]
-
-
-dataset,test_set = secondOrderImageDataset(imgs=imgs, gts=gt, numberOfLabels=2, 
-                                          fUnary=fUnary, fBinary=fBinary, 
-                                          addConstFeature=False)
-
-
-
-
-learner =  learning.subgradientSSVM(dataset, learningRate=0.1, C=100, 
-                                    learningMode='batch',maxIterations=10)
-
-
-
-learner.learn(infCls=opengm.inference.QpboExternal, 
-              parameter=opengm.InfParam())
-
-
-
-# predict on test test
-for (rgbImg, gtImg, gm) in test_set :
-    # infer for test image
-    inf = opengm.inference.QpboExternal(gm)
-    inf.infer()
-    arg = inf.arg()
-    arg = arg.reshape( numpy.squeeze(gtImg.shape))
-
-    vigra.segShow(rgbImg, arg+2)
-    vigra.show()
-
diff --git a/include/opengm/datastructures/marray/marray_hdf5.hxx b/include/opengm/datastructures/marray/marray_hdf5.hxx
index 8e292f1..6db10fb 100644
--- a/include/opengm/datastructures/marray/marray_hdf5.hxx
+++ b/include/opengm/datastructures/marray/marray_hdf5.hxx
@@ -74,8 +74,6 @@ template<class T>
     void load(const hid_t&, const std::string&, Marray<T>&);
 template<class T>
     void loadShape(const hid_t&, const std::string&, Vector<T>&);
-template<class T>
-    void loadVec(const hid_t&, const std::string&, std::vector<T>&);
 template<class T, class BaseIterator, class ShapeIterator>
     void loadHyperslab(const hid_t&, const std::string&,
         BaseIterator, BaseIterator, ShapeIterator, Marray<T>&);
@@ -513,29 +511,6 @@ void loadShape(
     handleCheck.check();
 }
 
-/// Load a vector of an HDF5 dataset.
-///
-/// \param groupHandle Handle of the parent HDF5 file or group.
-/// \param datasetName Name of the HDF5 dataset.
-/// \param out Shape.
-///
-/// \sa load()
-///
-template<class T>
-void loadVec(
-    const hid_t& groupHandle,
-    const std::string& datasetName,
-    std::vector<T>& out
-)
-{
-    marray::Marray<T> v;
-    load( groupHandle,datasetName,v);
-    out.resize(v.size());
-    for(size_t j=0; j<v.size(); ++j) {
-       out[j] = v(j);
-    }
-}
-
 /// Load a hyperslab from an HDF5 dataset into an Marray.
 /// 
 /// \param groupHandle Handle of the parent HDF5 file or group.
diff --git a/include/opengm/functions/function_properties_base.hxx b/include/opengm/functions/function_properties_base.hxx
index 68882d5..41f4130 100644
--- a/include/opengm/functions/function_properties_base.hxx
+++ b/include/opengm/functions/function_properties_base.hxx
@@ -18,7 +18,7 @@
 #include "opengm/operations/adder.hxx"
 #include "opengm/operations/integrator.hxx"
 #include "opengm/operations/multiplier.hxx"
-#include "opengm/graphicalmodel/weights.hxx"
+#include "opengm/graphicalmodel/parameters.hxx"
 
 #define OPENGM_FLOAT_TOL 0.000001
 
@@ -55,7 +55,6 @@ public:
    bool isAbsoluteDifference() const;
    bool isTruncatedAbsoluteDifference() const;
    bool isLinearConstraint() const;
-
    
    /// find minimum and maximum of the function in a single sweep
    /// \return class holding the minimum and the maximum
@@ -128,16 +127,12 @@ public:
    FunctionShapeIteratorType functionShapeBegin() const;
    FunctionShapeIteratorType functionShapeEnd() const;
 
-   size_t numberOfWeights()const{
+   size_t numberOfParameters()const{
       return 0;
    }
-   INDEX weightIndex(const size_t weightNumber)const{
+   INDEX parameterIndex(const size_t paramNumber)const{
       throw RuntimeError("Function base has no parameters,this needs to be implemented in any function type");
    }
-   void setWeights(const opengm::learning::Weights<VALUE>& /*weights*/) const {}
-   template<class ITERATOR> 
-   VALUE weightGradient(size_t,ITERATOR) const {return 0;}
-
 };
 
 
diff --git a/include/opengm/functions/l_potts.hxx b/include/opengm/functions/l_potts.hxx
new file mode 100644
index 0000000..8694859
--- /dev/null
+++ b/include/opengm/functions/l_potts.hxx
@@ -0,0 +1,202 @@
+#pragma once
+#ifndef OPENGM_L_POTTS_FUNCTION_HXX
+#define OPENGM_L_POTTS_FUNCTION_HXX
+
+#include <algorithm>
+#include <vector>
+#include <cmath>
+
+#include "opengm/opengm.hxx"
+#include "opengm/functions/function_registration.hxx"
+#include "opengm/functions/function_properties_base.hxx"
+
+namespace opengm {
+
+/// Potts function for two variables
+///
+/// \ingroup functions
+template<class T, class I = size_t, class L = size_t>
+class LPottsFunction
+: public FunctionBase<LPottsFunction<T, I, L>, T, I, L>
+{
+public:
+   typedef T ValueType;
+   typedef L LabelType;
+   typedef I IndexType;
+
+   LPottsFunction(
+      const LabelType,
+      const LabelType,
+      const Parameters<ValueType,IndexType> & parameters,
+      const IndexType valueNotEqual
+   );
+   LabelType shape(const size_t) const;
+   size_t size() const;
+   size_t dimension() const;
+   template<class ITERATOR> ValueType operator()(ITERATOR) const;
+   bool operator==(const LPottsFunction& ) const;
+   // specializations
+   bool isPotts() const;
+   bool isGeneralizedPotts() const;
+   ValueType min() const;
+   ValueType max() const;
+   ValueType sum() const;
+   ValueType product() const;
+   MinMaxFunctor<ValueType> minMax() const;
+
+   // parameters
+   size_t numberOfParameters()const{
+      return 1;
+   }
+   IndexType parameterIndex(const size_t paramNumber)const{
+      return piValueNotEqual_;
+   }
+
+
+private:
+   LabelType numberOfLabels1_;
+   LabelType numberOfLabels2_;
+
+   const Parameters<ValueType,IndexType> * params_;
+
+   IndexType piValueNotEqual_;
+
+friend class FunctionSerialization<LPottsFunction<T, I, L> > ;
+};
+
+
+template<class T, class I, class L>
+struct FunctionRegistration<LPottsFunction<T, I, L> > {
+   enum ID {
+      Id = opengm::FUNCTION_TYPE_ID_OFFSET + 100 + 6
+   };
+};
+
+
+
+
+
+template <class T, class I, class L>
+inline
+LPottsFunction<T, I, L>::LPottsFunction
+(
+   const L numberOfLabels1,
+   const L numberOfLabels2,
+   const Parameters<ValueType,IndexType> & parameters,
+   const IndexType valueNotEqual
+)
+:  numberOfLabels1_(numberOfLabels1),
+   numberOfLabels2_(numberOfLabels2),
+   params_(&parameters),
+   piValueNotEqual_(valueNotEqual)
+{}
+
+template <class T, class I, class L>
+template <class ITERATOR>
+inline T
+LPottsFunction<T, I, L>::operator()
+(
+   ITERATOR begin
+) const {
+   return (begin[0]==begin[1] ? 
+      static_cast<ValueType>(0.0) : params_->getParameter(piValueNotEqual_) );
+}
+
+
+
+template <class T, class I, class L>
+inline L
+LPottsFunction<T, I, L>::shape
+(
+   const size_t i
+) const {
+   OPENGM_ASSERT(i < 2);
+   return (i==0 ? numberOfLabels1_ : numberOfLabels2_);
+}
+
+template <class T, class I, class L>
+inline size_t
+LPottsFunction<T, I, L>::dimension() const {
+   return 2;
+}
+
+template <class T, class I, class L>
+inline size_t
+LPottsFunction<T, I, L>::size() const {
+   return numberOfLabels1_*numberOfLabels2_;
+}
+
+
+template<class T, class I, class L>
+inline bool
+LPottsFunction<T, I, L>::operator==
+(
+   const LPottsFunction & fb
+   )const{
+   return  numberOfLabels1_ == fb.numberOfLabels1_ &&
+      numberOfLabels2_ == fb.numberOfLabels2_ &&
+      piValueNotEqual_   == fb.piValueNotEqual_;
+}
+
+
+template<class T, class I, class L>
+inline bool
+LPottsFunction<T, I, L>::isPotts() const
+{
+   return true;
+}
+
+template<class T, class I, class L>
+inline bool
+LPottsFunction<T, I, L>::isGeneralizedPotts() const
+{
+   return true;
+}
+
+template<class T, class I, class L>
+inline typename LPottsFunction<T, I, L>::ValueType
+LPottsFunction<T, I, L>::min() const
+{
+   const T val = params_->getParameter(piValueNotEqual_);
+   return 0.0<val ? 0.0 :val;
+}
+
+template<class T, class I, class L>
+inline typename LPottsFunction<T, I, L>::ValueType
+LPottsFunction<T, I, L>::max() const
+{
+  const T val = params_->getParameter(piValueNotEqual_);
+  return 0.0>val ? 0.0 :val;
+}
+
+template<class T, class I, class L>
+inline typename LPottsFunction<T, I, L>::ValueType
+LPottsFunction<T, I, L>::sum() const
+{
+    const T val = params_->getParameter(piValueNotEqual_);
+    const LabelType minLabels = std::min(numberOfLabels1_, numberOfLabels2_);
+    return val * static_cast<T>(numberOfLabels1_ * numberOfLabels2_ - minLabels);
+}
+
+template<class T, class I, class L>
+inline typename LPottsFunction<T, I, L>::ValueType
+LPottsFunction<T, I, L>::product() const
+{
+   return static_cast<ValueType>(0);
+}
+
+template<class T, class I, class L>
+inline MinMaxFunctor<typename LPottsFunction<T, I, L>::ValueType>
+LPottsFunction<T, I, L>::minMax() const
+{
+   if(static_cast<ValueType>(0) < piValueNotEqual_) {
+      return MinMaxFunctor<T>(static_cast<ValueType>(0), params_[piValueNotEqual_]);
+   }
+   else {
+      return MinMaxFunctor<T>(params_[piValueNotEqual_], static_cast<ValueType>(0));
+   }
+}
+
+} // namespace opengm
+
+#endif // #ifndef OPENGM_L_POTTS_FUNCTION_HXX
diff --git a/include/opengm/functions/learnable/lpotts.hxx b/include/opengm/functions/learnable/lpotts.hxx
deleted file mode 100644
index a9938c6..0000000
--- a/include/opengm/functions/learnable/lpotts.hxx
+++ /dev/null
@@ -1,254 +0,0 @@
-#pragma once
-#ifndef OPENGM_LEARNABLE_POTTS_FUNCTION_HXX
-#define OPENGM_LEARNABLE_POTTS_FUNCTION_HXX
-
-#include <algorithm>
-#include <vector>
-#include <cmath>
-
-#include "opengm/opengm.hxx"
-#include "opengm/functions/function_registration.hxx"
-#include "opengm/functions/function_properties_base.hxx"
-#include "opengm/graphicalmodel/weights.hxx"
-
-namespace opengm {
-namespace functions {
-namespace learnable {
-
-/// Learnable feature function for two variables
-///
-/// f(u,v) = (\sum_i w_i * feat_i) I(u!=v)
-///  - w    = parameter vector
-///  - feat = feature vector
-///
-/// derive from this class and implement the function
-///   paramaterGradient(i,x)= A(x)_{i,*}*feat
-///  
-/// \ingroup functions
-template<class T, class I = size_t, class L = size_t>
-class LPotts
-   : public opengm::FunctionBase<opengm::functions::learnable::LPotts<T, I, L>, T, I, L>
-{
-public:
-   typedef T ValueType;
-   typedef L LabelType;
-   typedef I IndexType;
- 
-   LPotts();
-   LPotts(const opengm::learning::Weights<T>& weights,
-      const L numLabels,
-      const std::vector<size_t>& weightIDs,
-      const std::vector<T>& feat
-      );
-   LPotts(const L numLabels,
-      const std::vector<size_t>& weightIDs,
-      const std::vector<T>& feat
-      );
-   L shape(const size_t) const;
-   size_t size() const;
-   size_t dimension() const;
-   template<class ITERATOR> T operator()(ITERATOR) const;
- 
-   // parameters
-   void setWeights(const opengm::learning::Weights<T>& weights) const
-      {weights_ = &weights;}
-   size_t numberOfWeights()const
-     {return weightIDs_.size();}
-   I weightIndex(const size_t weightNumber) const
-     {return weightIDs_[weightNumber];} //dummy
-   template<class ITERATOR> 
-   T weightGradient(size_t,ITERATOR) const;
-
-   bool isPotts() const {return true;}
-   bool isGeneralizedPotts() const {return true;}
-
-protected:
-   mutable const opengm::learning::Weights<T> * weights_;
-   L numLabels_;
-   std::vector<size_t> weightIDs_;
-   std::vector<T> feat_;
-
-
-    friend class opengm::FunctionSerialization<opengm::functions::learnable::LPotts<T, I, L> >;
-};
-
-
-template <class T, class I, class L>
-inline
-LPotts<T, I, L>::LPotts
-( 
-   const opengm::learning::Weights<T>& weights,
-   const L numLabels,
-   const std::vector<size_t>& weightIDs,
-   const std::vector<T>& feat
-   )
-   :  weights_(&weights), numLabels_(numLabels), weightIDs_(weightIDs),feat_(feat)
-{
-  OPENGM_ASSERT( weightIDs_.size()==feat_.size() );
-}
-
-template <class T, class I, class L>
-inline
-LPotts<T, I, L>::LPotts
-( 
-   const L numLabels,
-   const std::vector<size_t>& weightIDs,
-   const std::vector<T>& feat
-   )
-   : numLabels_(numLabels), weightIDs_(weightIDs),feat_(feat)
-{
-  OPENGM_ASSERT( weightIDs_.size()==feat_.size() );
-}
-
-template <class T, class I, class L>
-inline
-LPotts<T, I, L>::LPotts
-( )
-   : numLabels_(0), weightIDs_(std::vector<size_t>(0)), feat_(std::vector<T>(0))
-{
-  OPENGM_ASSERT( weightIDs_.size()==feat_.size() );
-}
-
-
-template <class T, class I, class L>
-template <class ITERATOR>
-inline T
-LPotts<T, I, L>::weightGradient 
-(
-   size_t weightNumber,
-   ITERATOR begin
-) const {
-  OPENGM_ASSERT(weightNumber< numberOfWeights());
-  if( *(begin) != *(begin+1) )
-    return (*this).feat_[weightNumber];
-  return 0;
-}
-
-template <class T, class I, class L>
-template <class ITERATOR>
-inline T
-LPotts<T, I, L>::operator()
-(
-   ITERATOR begin
-) const {
-   T val = 0;
-   for(size_t i=0;i<numberOfWeights();++i){
-      val += weights_->getWeight(weightIDs_[i]) * weightGradient(i,begin);
-   }
-   return val;
-}
-
-
-template <class T, class I, class L>
-inline L
-LPotts<T, I, L>::shape
-(
-   const size_t i
-) const {
-   return numLabels_;
-}
-
-template <class T, class I, class L>
-inline size_t
-LPotts<T, I, L>::dimension() const {
-   return 2;
-}
-
-template <class T, class I, class L>
-inline size_t
-LPotts<T, I, L>::size() const {
-   return numLabels_*numLabels_;
-}
-
-} // namespace learnable
-} // namespace functions
-
-
-/// FunctionSerialization
-template<class T, class I, class L>
-class FunctionSerialization<opengm::functions::learnable::LPotts<T, I, L> > {
-public:
-   typedef typename opengm::functions::learnable::LPotts<T, I, L>::ValueType ValueType;
-
-   static size_t indexSequenceSize(const opengm::functions::learnable::LPotts<T, I, L>&);
-   static size_t valueSequenceSize(const opengm::functions::learnable::LPotts<T, I, L>&);
-   template<class INDEX_OUTPUT_ITERATOR, class VALUE_OUTPUT_ITERATOR>
-      static void serialize(const opengm::functions::learnable::LPotts<T, I, L>&, INDEX_OUTPUT_ITERATOR, VALUE_OUTPUT_ITERATOR);
-   template<class INDEX_INPUT_ITERATOR, class VALUE_INPUT_ITERATOR>
-      static void deserialize( INDEX_INPUT_ITERATOR, VALUE_INPUT_ITERATOR, opengm::functions::learnable::LPotts<T, I, L>&);
-};
-
-template<class T, class I, class L>
-struct FunctionRegistration<opengm::functions::learnable::LPotts<T, I, L> > {
-   enum ID {
-      Id = opengm::FUNCTION_TYPE_ID_OFFSET + 100 + 65
-   };
-};
-
-template<class T, class I, class L>
-inline size_t
-FunctionSerialization<opengm::functions::learnable::LPotts<T, I, L> >::indexSequenceSize
-(
-   const opengm::functions::learnable::LPotts<T, I, L> & src
-) {
-  return 2+src.weightIDs_.size();
-}
-
-template<class T, class I, class L>
-inline size_t
-FunctionSerialization<opengm::functions::learnable::LPotts<T, I, L> >::valueSequenceSize
-(
-   const opengm::functions::learnable::LPotts<T, I, L> & src
-) {
-  return src.feat_.size();
-}
-
-template<class T, class I, class L>
-template<class INDEX_OUTPUT_ITERATOR, class VALUE_OUTPUT_ITERATOR >
-inline void
-FunctionSerialization<opengm::functions::learnable::LPotts<T, I, L> >::serialize
-(
-   const opengm::functions::learnable::LPotts<T, I, L> & src,
-   INDEX_OUTPUT_ITERATOR indexOutIterator,
-   VALUE_OUTPUT_ITERATOR valueOutIterator
-) {
-   *indexOutIterator = src.numLabels_;
-   ++indexOutIterator; 
-   *indexOutIterator = src.feat_.size();
-   ++indexOutIterator;
-   for(size_t i=0; i<src.weightIDs_.size();++i){
-     *indexOutIterator = src.weightIndex(i);
-     ++indexOutIterator;
-   } 
-   for(size_t i=0; i<src.feat_.size();++i){
-     *valueOutIterator = src.feat_[i];
-     ++valueOutIterator;
-   }
-}
-
-template<class T, class I, class L>
-template<class INDEX_INPUT_ITERATOR, class VALUE_INPUT_ITERATOR >
-inline void
-FunctionSerialization<opengm::functions::learnable::LPotts<T, I, L> >::deserialize
-(
-   INDEX_INPUT_ITERATOR indexInIterator,
-   VALUE_INPUT_ITERATOR valueInIterator,
-   opengm::functions::learnable::LPotts<T, I, L> & dst
-) { 
-   dst.numLabels_=*indexInIterator;
-   ++ indexInIterator;
-   const size_t numW=*indexInIterator;
-   ++indexInIterator;
-   dst.feat_.resize(numW);
-   dst.weightIDs_.resize(numW);
-   for(size_t i=0; i<numW;++i){
-     dst.feat_[i]=*valueInIterator;
-     dst.weightIDs_[i]=*indexInIterator;
-     ++indexInIterator;
-     ++valueInIterator;
-   }
-}
-
-} // namespace opengm
-
-#endif // #ifndef OPENGM_LEARNABLE_FUNCTION_HXX
diff --git a/include/opengm/functions/learnable/lunary.hxx b/include/opengm/functions/learnable/lunary.hxx
deleted file mode 100644
index cc1b930..0000000
--- a/include/opengm/functions/learnable/lunary.hxx
+++ /dev/null
@@ -1,478 +0,0 @@
-#pragma once
-#ifndef OPENGM_LEARNABLE_UNARY_FUNCTION_HXX
-#define OPENGM_LEARNABLE_UNARY_FUNCTION_HXX
-
-#include <algorithm>
-#include <vector>
-#include <cmath>
-
-#include "opengm/opengm.hxx"
-#include "opengm/functions/function_registration.hxx"
-#include "opengm/functions/function_properties_base.hxx"
-#include "opengm/graphicalmodel/weights.hxx"
-
-namespace opengm {
-namespace functions {
-namespace learnable {
-
-
-
-
-
-template<class V, class I>
-struct FeaturesAndIndices{
-    std::vector<V> features;
-    std::vector<I> weightIds;
-};
-
-
-
-
-template<class T, class I , class L>
-class LUnary
-   : public opengm::FunctionBase<opengm::functions::learnable::LUnary<T, I, L>, T, I, L>
-{
-public:
-    typedef T ValueType;
-    typedef T V;
-    typedef L LabelType;
-    typedef I IndexType;
-
-    LUnary()
-    :  
-    weights_(NULL),
-    numberOfLabels_(0), 
-    offsets_(),
-    weightIds_(),
-    features_()
-    {
-
-    }
-
-    LUnary(
-        const opengm::learning::Weights<T>&     weights,
-        std::vector<FeaturesAndIndices<T, I> >  featuresAndIndicesPerLabel
-    );
-
-    LUnary(
-        const opengm::learning::Weights<T>& weights,    
-        const LabelType                     numberOfLabels,
-        marray::Marray< size_t >            weightIds,
-        marray::Marray< ValueType>          features,
-        const bool                          makeFirstEntryConst
-    );
-
-
-    L shape(const size_t) const;
-    size_t size() const;
-    size_t dimension() const;
-    template<class ITERATOR> T operator()(ITERATOR) const;
-
-    // parameters
-    void setWeights(const opengm::learning::Weights<T>& weights) const{
-        weights_ = &weights;
-    }
-
-    size_t numberOfWeights()const{
-        return weightIds_.size();
-    }
-
-    I weightIndex(const size_t weightNumber) const{
-        return weightIds_[weightNumber];
-    } 
-
-    template<class ITERATOR> 
-    T weightGradient(size_t,ITERATOR) const;
-
-private:
-
-
-protected:
-
-    size_t numWeightsForL(const LabelType l )const{
-        return offsets_[0*numberOfLabels_ + l];
-    }
-    size_t weightIdOffset(const LabelType l )const{
-        return offsets_[1*numberOfLabels_ + l];
-    }
-    size_t featureOffset(const LabelType l )const{
-        return offsets_[2*numberOfLabels_ + l];
-    }
-
-    mutable const opengm::learning::Weights<T> *    weights_;
-
-    IndexType numberOfLabels_;
-    std::vector<IndexType> offsets_;
-    std::vector<size_t> weightIds_;
-    std::vector<ValueType> features_;
-
-
-    friend class opengm::FunctionSerialization<opengm::functions::learnable::LUnary<T, I, L> >;
-
-
-};
-
-template <class T, class I, class L>
-LUnary<T, I, L>::LUnary(
-    const opengm::learning::Weights<T>& weights,    
-    const LabelType                     numberOfLabels,
-    marray::Marray< size_t >            weightIds,
-    marray::Marray< ValueType>          features,
-    const bool                          makeFirstEntryConst
-)
-:  
-weights_(&weights),
-numberOfLabels_(numberOfLabels), 
-offsets_(numberOfLabels*3),
-weightIds_(),
-features_()
-{
-    const size_t pFeatDim       = features.dimension();
-    const size_t pWeightIdDim   = weightIds.dimension();
-
-    OPENGM_CHECK_OP(weightIds.dimension(), ==, 2 , "wrong dimension");
-    OPENGM_CHECK_OP(weightIds.shape(0)+int(makeFirstEntryConst), ==, numberOfLabels , "wrong shape");
-
-
-    const size_t nWeights = weightIds.size();
-    weightIds_.resize(nWeights);
-
-    const size_t nFeat  = features.size();
-    features_.resize(nFeat);
-
-
-    OPENGM_CHECK_OP(features.dimension(), == , 1 , "feature dimension must be 1 ");
-    OPENGM_CHECK_OP(features.shape(0), == , weightIds.shape(1) , "feature dimension must be 1");
-
-    // copy features
-    for(size_t fi=0; fi<nFeat; ++fi){
-        features_[fi] = features(fi);
-    }
-
-    size_t nwForL = weightIds.shape(1);
-    size_t wOffset = 0;
-
-    if(makeFirstEntryConst){
-
-        OPENGM_CHECK_OP(numberOfLabels_-1, == , weightIds.shape(0),"internal error");
-
-        offsets_[0*numberOfLabels_ + 0] = 0;
-        offsets_[1*numberOfLabels_ + 0] = 0;
-        offsets_[2*numberOfLabels_ + 0] = 0;
-
-        for(LabelType l=1; l<numberOfLabels_; ++l){
-            offsets_[0*numberOfLabels_ + l] = nwForL;
-            offsets_[1*numberOfLabels_ + l] = wOffset;
-            offsets_[2*numberOfLabels_ + l] = 0;
-            // copy weight ids
-            for(size_t wi=0; wi<nwForL; ++wi){
-                weightIds_[wOffset + wi] = weightIds(l-1,wi);
-            }
-            wOffset += nwForL;
-        }
-    }
-    else{
-        OPENGM_CHECK_OP(numberOfLabels_, == , weightIds.shape(0),"internal error");
-        for(LabelType l=0; l<numberOfLabels_; ++l){
-
-            offsets_[0*numberOfLabels_ + l] = nwForL;
-            offsets_[1*numberOfLabels_ + l] = wOffset;
-            offsets_[2*numberOfLabels_ + l] = 0;
-            // copy weight ids
-            for(size_t wi=0; wi<nwForL; ++wi){
-                weightIds_[wOffset + wi] = weightIds(l,wi);
-            }
-            wOffset += nwForL;
-        }
-    }
-
-}
-
-template <class T, class I, class L>
-inline
-LUnary<T, I, L>::LUnary
-( 
-   const opengm::learning::Weights<T> & weights, 
-   std::vector<FeaturesAndIndices<V, I> >  featuresAndIndicesPerLabel 
-)
-:  
-weights_(&weights),
-numberOfLabels_(featuresAndIndicesPerLabel.size()), 
-offsets_(featuresAndIndicesPerLabel.size()*3),
-weightIds_(),
-features_()
-{
-
-    size_t fOffset = 0;
-    size_t wOffset = 0;
-
-
-    // fetch the offsets
-    for(size_t l=0; l<featuresAndIndicesPerLabel.size(); ++l){
-        const size_t nwForL  = featuresAndIndicesPerLabel[l].weightIds.size();
-        const size_t nfForL  = featuresAndIndicesPerLabel[l].features.size();
-        OPENGM_CHECK_OP(nwForL, == , nfForL, "number of features and weighs"
-            "must be the same for a given label within this overload of LUnary<T, I, L>::LUnary");
-
-        offsets_[0*numberOfLabels_ + l] = nwForL;
-        offsets_[1*numberOfLabels_ + l] = wOffset;
-        offsets_[2*numberOfLabels_ + l] = fOffset;
-
-        wOffset += nwForL;
-        fOffset += nfForL;
-    }
-
-    weightIds_.resize(wOffset);
-    features_.resize(fOffset);
-
-    // write weightIDs and features
-    for(size_t l=0; l<featuresAndIndicesPerLabel.size(); ++l){
-        const size_t nwForL = numWeightsForL(l);
-        for(size_t i=0; i<nwForL; ++i){
-            weightIds_[featureOffset(l)+i] = featuresAndIndicesPerLabel[l].weightIds[i];
-            features_[featureOffset(l)+i] = featuresAndIndicesPerLabel[l].features[i];
-        }
-    }
-
-    // check that there are no duplicates
-    RandomAccessSet<size_t> idSet;
-    idSet.reserve(weightIds_.size());
-    idSet.insert(weightIds_.begin(), weightIds_.end());
-
-    OPENGM_CHECK_OP(idSet.size(), == , weightIds_.size(), "weightIds has duplicates");
-}
-
-
-
-template <class T, class I, class L>
-template <class ITERATOR>
-inline T
-LUnary<T, I, L>::weightGradient 
-(
-   size_t weightNumber,
-   ITERATOR begin
-) const {
-    OPENGM_CHECK_OP(weightNumber,<,numberOfWeights(), 
-        "weightNumber must be smaller than number of weights");
-    const LabelType l(*begin);
-    const size_t nwForL = numWeightsForL(l);
-    if(nwForL>0){
-        const size_t wiStart = weightIdOffset(l);
-        const size_t wiEnd   = weightIdOffset(l)+nwForL;
-        if(weightNumber >= wiStart && weightNumber < wiEnd ){
-            const size_t wii = weightNumber - wiStart;
-            return features_[featureOffset(l) + wii];
-        }
-    }
-    return static_cast<ValueType>(0);
-}
-
-template <class T, class I, class L>
-template <class ITERATOR>
-inline T
-LUnary<T, I, L>::operator()
-(
-   ITERATOR begin
-) const {
-
-    //std::cout<<"LUnary::operator()\n";
-    //OPENGM_CHECK_OP( int(weights_==NULL),==,int(false),"foo");
-    T val = 0;
-    const LabelType l(*begin);
-    const size_t nwForL = numWeightsForL(l);
-    //std::cout<<"nw for l "<<nwForL<<"\n";
-    //std::cout<<"wsize "<<weights_->size()<<"\n";
-
-    for(size_t i=0; i<nwForL; ++i){
-        //std::cout<<" i "<<i<<"\n";
-        //OPENGM_CHECK_OP(weightIdOffset(l)+i,<,weightIds_.size(),"foo");
-        //OPENGM_CHECK_OP(featureOffset(l)+i,<,features_.size(),"foo");
-        const size_t wi = weightIds_[weightIdOffset(l)+i];
-        //OPENGM_CHECK_OP(wi,<,weights_->size(),"foo");
-
-        val += weights_->getWeight(wi) * features_[featureOffset(l)+i];
-    }
-    //d::cout<<"LUnary::return operator()\n";
-    return val;
-}
-
-
-template <class T, class I, class L>
-inline L
-LUnary<T, I, L>::shape
-(
-   const size_t i
-) const {
-   return numberOfLabels_;
-}
-
-template <class T, class I, class L>
-inline size_t
-LUnary<T, I, L>::dimension() const {
-   return 1;
-}
-
-template <class T, class I, class L>
-inline size_t
-LUnary<T, I, L>::size() const {
-   return numberOfLabels_;
-}
-
-} // namespace learnable
-} // namespace functions
-
-
-/// FunctionSerialization
-template<class T, class I, class L>
-class FunctionSerialization<opengm::functions::learnable::LUnary<T, I, L> > {
-public:
-   typedef typename opengm::functions::learnable::LUnary<T, I, L>::ValueType ValueType;
-
-   static size_t indexSequenceSize(const opengm::functions::learnable::LUnary<T, I, L>&);
-   static size_t valueSequenceSize(const opengm::functions::learnable::LUnary<T, I, L>&);
-   template<class INDEX_OUTPUT_ITERATOR, class VALUE_OUTPUT_ITERATOR>
-      static void serialize(const opengm::functions::learnable::LUnary<T, I, L>&, INDEX_OUTPUT_ITERATOR, VALUE_OUTPUT_ITERATOR);
-   template<class INDEX_INPUT_ITERATOR, class VALUE_INPUT_ITERATOR>
-      static void deserialize( INDEX_INPUT_ITERATOR, VALUE_INPUT_ITERATOR, opengm::functions::learnable::LUnary<T, I, L>&);
-};
-
-template<class T, class I, class L>
-struct FunctionRegistration<opengm::functions::learnable::LUnary<T, I, L> > {
-   enum ID {
-      Id = opengm::FUNCTION_TYPE_ID_OFFSET + 100 + 66
-   };
-};
-
-template<class T, class I, class L>
-inline size_t
-FunctionSerialization<opengm::functions::learnable::LUnary<T, I, L> >::indexSequenceSize
-(
-   const opengm::functions::learnable::LUnary<T, I, L> & src
-) {
-
-    size_t size = 0;
-    size += 1; // numberOfLabels
-    size += 1; // numberOfWeights
-    size += 1; // numberOfFeatures
-
-    size += 3*src.shape(0);         // offsets serialization 
-    size += src.weightIds_.size();  // weight id serialization
-
-    return size;
-}
-
-template<class T, class I, class L>
-inline size_t
-FunctionSerialization<opengm::functions::learnable::LUnary<T, I, L> >::valueSequenceSize
-(
-   const opengm::functions::learnable::LUnary<T, I, L> & src
-) {
-  return src.features_.size(); // feature serialization
-}
-
-template<class T, class I, class L>
-template<class INDEX_OUTPUT_ITERATOR, class VALUE_OUTPUT_ITERATOR >
-inline void
-FunctionSerialization<opengm::functions::learnable::LUnary<T, I, L> >::serialize
-(
-    const opengm::functions::learnable::LUnary<T, I, L> & src,
-    INDEX_OUTPUT_ITERATOR indexOutIterator,
-    VALUE_OUTPUT_ITERATOR valueOutIterator
-) {
-
-    ///////////////////////////////////////
-    /// INDEX SERIALIZATION
-    ////////////////////////////////////////
-    // number of labels
-    *indexOutIterator = src.shape(0);
-    ++indexOutIterator; 
-
-    // number of weights
-    *indexOutIterator = src.weightIds_.size();
-    ++indexOutIterator; 
-    
-    // number of features
-    *indexOutIterator = src.features_.size();
-    ++indexOutIterator; 
-
-    // offset serialization
-    for(size_t i=0; i<src.offsets_.size(); ++i){
-        *indexOutIterator = src.offsets_[i];
-        ++indexOutIterator;
-    }
-
-    // weight id serialization
-    for(size_t i=0; i<src.weightIds_.size(); ++i){
-        *indexOutIterator = src.weightIds_[i];
-        ++indexOutIterator;
-    }
-
-    ///////////////////////////////////////
-    /// VALUE SERIALIZATION
-    ////////////////////////////////////////
-    // feature serialization
-    for(size_t i=0; i<src.features_.size(); ++i){
-        *valueOutIterator = src.features_[i];
-        ++valueOutIterator;
-    }
-}
-
-template<class T, class I, class L>
-template<class INDEX_INPUT_ITERATOR, class VALUE_INPUT_ITERATOR >
-inline void
-FunctionSerialization<opengm::functions::learnable::LUnary<T, I, L> >::deserialize
-(
-   INDEX_INPUT_ITERATOR indexInIterator,
-   VALUE_INPUT_ITERATOR valueInIterator,
-   opengm::functions::learnable::LUnary<T, I, L> & dst
-) { 
-
-
-
-    ///////////////////////////////////////
-    /// INDEX DESERIALIZATION
-    ////////////////////////////////////////
-    // number of labels
-    dst.numberOfLabels_ = *indexInIterator;
-    ++indexInIterator;
-    // resize offset accordingly
-    dst.offsets_.resize(3 * dst.numberOfLabels_);
-
-
-    // number of weights
-    const size_t nW =*indexInIterator;
-    ++indexInIterator;
-    // resize weightIds accordingly
-    dst.weightIds_.resize(nW);
-
-    // number of features
-    const size_t nF = *indexInIterator;
-    ++indexInIterator;
-    // resize weightIds accordingly
-    dst.features_.resize(nF);
-
-    // offset deserialization
-    for(size_t i=0; i<dst.offsets_.size(); ++i){
-        dst.offsets_[i] = *indexInIterator;
-        ++indexInIterator;
-    }
-
-    // weight id deserialization
-    for(size_t i=0; i<dst.weightIds_.size(); ++i){
-        dst.weightIds_[i] = *indexInIterator;
-        ++indexInIterator;
-    }
-
-    ///////////////////////////////////////
-    /// VALUE DESERIALIZATION
-    ////////////////////////////////////////
-    // feature deserialization
-    for(size_t i=0; i<dst.features_.size(); ++i){
-        dst.features_[i] = *valueInIterator;
-        ++valueInIterator;
-    } 
-}
-
-} // namespace opengm
-
-#endif // #ifndef OPENGM_LEARNABLE_FUNCTION_HXX
diff --git a/include/opengm/functions/learnable/lweightedsum_of_functions.hxx b/include/opengm/functions/learnable/lweightedsum_of_functions.hxx
deleted file mode 100644
index 62314f4..0000000
--- a/include/opengm/functions/learnable/lweightedsum_of_functions.hxx
+++ /dev/null
@@ -1,264 +0,0 @@
-#pragma once
-#ifndef OPENGM_LEARNABLE_LWEIGHTEDSUM_OF_FUNCTIONS_FUNCTION_HXX
-#define OPENGM_LEARNABLE_LWEIGHTEDSUM_OF_FUNCTIONS_FUNCTION_HXX
-
-#include <algorithm>
-#include <vector>
-#include <cmath>
-
-#include "opengm/opengm.hxx"
-#include "opengm/functions/function_registration.hxx"
-#include "opengm/functions/function_properties_base.hxx"
-#include "opengm/datastructures/marray/marray.hxx"
-#include "opengm/graphicalmodel/weights.hxx"
-
-namespace opengm {
-namespace functions {
-namespace learnable {
-
-/// Learnable weighted sum of feature-functions
-///
-/// f(x) = \sum_i w(i) * feat(i)(x)
-///  - w    = parameter vector
-///  - feat = feature-function vector
-///
-///  
-/// \ingroup functions
-template<class T, class I = size_t, class L = size_t>
-class LWeightedSumOfFunctions
-   : public opengm::FunctionBase<opengm::functions::learnable::LWeightedSumOfFunctions<T, I, L>, T, I, L>
-{
-public:
-   typedef T ValueType;
-   typedef L LabelType;
-   typedef I IndexType;
- 
-   LWeightedSumOfFunctions();
-   LWeightedSumOfFunctions(const std::vector<L>& shape,
-      const opengm::learning::Weights<T>& weights,
-      const std::vector<size_t>& weightIDs,
-      const std::vector<marray::Marray<T> >& feat
-      );
- 
-   L shape(const size_t) const;
-   size_t size() const;
-   size_t dimension() const;
-   template<class ITERATOR> T operator()(ITERATOR) const;
- 
-   // parameters
-   void setWeights(const opengm::learning::Weights<T>& weights) const
-      {weights_ = &weights;}
-   size_t numberOfWeights()const
-     {return weightIDs_.size();}
-   I weightIndex(const size_t weightNumber) const
-     {return weightIDs_[weightNumber];} //dummy
-   template<class ITERATOR> 
-   T weightGradient(size_t,ITERATOR) const;
-
-protected:
-   mutable const opengm::learning::Weights<T>* weights_;
-   std::vector<L>                          shape_;
-   std::vector<size_t>                     weightIDs_;
-   std::vector<marray::Marray<T> >         feat_;
-
-   friend class opengm::FunctionSerialization<opengm::functions::learnable::LWeightedSumOfFunctions<T, I, L> >;
-};
-
-
-template <class T, class I, class L>
-inline
-LWeightedSumOfFunctions<T, I, L>::LWeightedSumOfFunctions
-( 
-   const std::vector<L>&                           shape,
-   const opengm::learning::Weights<T>&             weights,
-   const std::vector<size_t>&                      weightIDs,
-   const std::vector<marray::Marray<T> >&          feat
-   )
-   :   shape_(shape), weights_(&weights), weightIDs_(weightIDs),feat_(feat)
-{
-   OPENGM_ASSERT( weightIDs_.size() == feat_.size() );
-   for(size_t i=0; i<weightIDs_.size(); ++i){
-      OPENGM_ASSERT( size() == feat_[i].size() );
-      for(size_t j=0; j<dimension(); ++j)
-          OPENGM_ASSERT( shape_[j] == feat_[i].shape(j))
-   }
-}
-
-template <class T, class I, class L>
-inline
-LWeightedSumOfFunctions<T, I, L>::LWeightedSumOfFunctions()
-   : shape_(std::vector<L>(0)), weightIDs_(std::vector<size_t>(0)), feat_(std::vector<marray::Marray<T> >(0))
-{
-   ;
-}
-
-
-template <class T, class I, class L>
-template <class ITERATOR>
-inline T
-LWeightedSumOfFunctions<T, I, L>::weightGradient
-(
-   size_t weightNumber,
-   ITERATOR begin
-) const {
-  OPENGM_ASSERT(weightNumber< numberOfWeights());
-  return feat_[weightNumber](begin);
-}
-
-template <class T, class I, class L>
-template <class ITERATOR>
-inline T
-LWeightedSumOfFunctions<T, I, L>::operator()
-(
-   ITERATOR begin
-) const {
-   T val = 0;
-   for(size_t i=0;i<numberOfWeights();++i){
-      val += weights_->getWeight(weightIDs_[i]) * weightGradient(i,begin);
-   }
-   return val;
-}
-
-
-template <class T, class I, class L>
-inline L
-LWeightedSumOfFunctions<T, I, L>::shape
-(
-   const size_t i
-) const {
-   return shape_[i];
-}
-
-template <class T, class I, class L>
-inline size_t
-LWeightedSumOfFunctions<T, I, L>::dimension() const {
-   return shape_.size();
-}
-
-template <class T, class I, class L>
-inline size_t
-LWeightedSumOfFunctions<T, I, L>::size() const {
-   size_t s = 1;
-   for(size_t i=0; i<dimension(); ++i)
-      s *=shape_[i];
-   return s;
-}
-
-} // namespace learnable
-} // namespace functions
-
-
-/// FunctionSerialization
-template<class T, class I, class L>
-class FunctionSerialization<opengm::functions::learnable::LWeightedSumOfFunctions<T, I, L> > {
-public:
-   typedef typename opengm::functions::learnable::LWeightedSumOfFunctions<T, I, L>::ValueType ValueType;
-
-   static size_t indexSequenceSize(const opengm::functions::learnable::LWeightedSumOfFunctions<T, I, L>&);
-   static size_t valueSequenceSize(const opengm::functions::learnable::LWeightedSumOfFunctions<T, I, L>&);
-   template<class INDEX_OUTPUT_ITERATOR, class VALUE_OUTPUT_ITERATOR>
-      static void serialize(const opengm::functions::learnable::LWeightedSumOfFunctions<T, I, L>&, INDEX_OUTPUT_ITERATOR, VALUE_OUTPUT_ITERATOR);
-   template<class INDEX_INPUT_ITERATOR, class VALUE_INPUT_ITERATOR>
-      static void deserialize( INDEX_INPUT_ITERATOR, VALUE_INPUT_ITERATOR, opengm::functions::learnable::LWeightedSumOfFunctions<T, I, L>&);
-};
-
-template<class T, class I, class L>
-struct FunctionRegistration<opengm::functions::learnable::LWeightedSumOfFunctions<T, I, L> > {
-   enum ID {
-      Id = opengm::FUNCTION_TYPE_ID_OFFSET + 100 + 67
-   };
-};
-
-template<class T, class I, class L>
-inline size_t
-FunctionSerialization<opengm::functions::learnable::LWeightedSumOfFunctions<T, I, L> >::indexSequenceSize
-(
-   const opengm::functions::learnable::LWeightedSumOfFunctions<T, I, L> & src
-) {
-   return 1+src.shape_.size()+1+src.weightIDs_.size();
-}
-
-template<class T, class I, class L>
-inline size_t
-FunctionSerialization<opengm::functions::learnable::LWeightedSumOfFunctions<T, I, L> >::valueSequenceSize
-(
-   const opengm::functions::learnable::LWeightedSumOfFunctions<T, I, L> & src
-) {
-   return src.feat_.size()*src.size();
-}
-
-template<class T, class I, class L>
-template<class INDEX_OUTPUT_ITERATOR, class VALUE_OUTPUT_ITERATOR >
-inline void
-FunctionSerialization<opengm::functions::learnable::LWeightedSumOfFunctions<T, I, L> >::serialize
-(
-   const opengm::functions::learnable::LWeightedSumOfFunctions<T, I, L> & src,
-   INDEX_OUTPUT_ITERATOR indexOutIterator,
-   VALUE_OUTPUT_ITERATOR valueOutIterator
-) {
-   // save shape
-   *indexOutIterator = src.shape_.size();
-   ++indexOutIterator; 
-   for(size_t i=0; i<src.shape_.size();++i){
-      *indexOutIterator = src.shape_[i];
-      ++indexOutIterator; 
-   }
-   //save parameter ids
-   *indexOutIterator = src.weightIDs_.size();
-   ++indexOutIterator; 
-   for(size_t i=0; i<src.weightIDs_.size();++i){
-      *indexOutIterator = src.weightIDs_[i];
-      ++indexOutIterator; 
-   }
-
-   OPENGM_ASSERT_OP(src.weightIDs_.size(), ==, src.feat_.size());
-
-   // save features  
-   for(size_t i=0; i<src.weightIDs_.size();++i){
-      for(size_t j=0; j<src.feat_[i].size();++j){
-         *valueOutIterator = src.feat_[i](j);
-         ++valueOutIterator;
-      }
-   }
-}
-
-template<class T, class I, class L>
-template<class INDEX_INPUT_ITERATOR, class VALUE_INPUT_ITERATOR >
-inline void
-FunctionSerialization<opengm::functions::learnable::LWeightedSumOfFunctions<T, I, L> >::deserialize
-(
-   INDEX_INPUT_ITERATOR indexInIterator,
-   VALUE_INPUT_ITERATOR valueInIterator,
-   opengm::functions::learnable::LWeightedSumOfFunctions<T, I, L> & dst
-) { 
-   //read shape
-   size_t dim  = *indexInIterator;
-   size_t size = 1;
-   ++indexInIterator;
-   std::vector<L> shape(dim);
-   for(size_t i=0; i<dim;++i){
-      shape[i] = *indexInIterator;
-      size    *= *indexInIterator; 
-      ++indexInIterator;
-   }
-   //read parameter ids
-   size_t numW =*indexInIterator;
-   ++indexInIterator;
-   std::vector<size_t> parameterIDs(numW);
-   for(size_t i=0; i<numW;++i){ 
-      parameterIDs[i] = *indexInIterator;
-      ++indexInIterator;
-   }
-   //read features
-   std::vector<marray::Marray<T> > feat(numW,marray::Marray<T>(shape.begin(),shape.end()));
-   for(size_t i=0; i<numW;++i){   
-      for(size_t j=0; j<size;++j){
-         feat[i](j)=*valueInIterator;
-         ++valueInIterator;
-      }
-   }   
-}
-
-} // namespace opengm
-
-#endif //OPENGM_LEARNABLE_LWEIGHTEDSUM_OF_FUNCTIONS_FUNCTION_HXX
diff --git a/include/opengm/functions/potts.hxx b/include/opengm/functions/potts.hxx
index cb061f9..092887f 100644
--- a/include/opengm/functions/potts.hxx
+++ b/include/opengm/functions/potts.hxx
@@ -33,7 +33,7 @@ public:
    bool operator==(const PottsFunction& ) const;
    ValueType valueEqual() const;
    ValueType valueNotEqual() const;
-   IndexType numberOfWeights() const;
+   IndexType numberOfParameters() const;
    ValueType parameter(const size_t index) const;
    ValueType& parameter(const size_t index);
 
@@ -211,7 +211,7 @@ PottsFunction<T, I, L>::operator==
 
 template<class T, class I, class L>
 inline typename PottsFunction<T, I, L>::IndexType
-PottsFunction<T, I, L>::numberOfWeights() const
+PottsFunction<T, I, L>::numberOfParameters() const
 {
    return 2;
 }
diff --git a/include/opengm/functions/readme.txt b/include/opengm/functions/readme.txt
index 6d9b9f5..c538627 100644
--- a/include/opengm/functions/readme.txt
+++ b/include/opengm/functions/readme.txt
@@ -59,9 +59,6 @@ opengm::StaticSingleSideFunction             16009
 opengm::DynamicSingleSideFunction            16010
 opengm::PottsG                               16011
 
-opengm::LPotts                               16165
-opengm::LUnary                               16166
-opengm::SumOfExperts                         16167
 
 /////////////////////////////////////////////////////////////
 4. Serialization and de-serialization
diff --git a/include/opengm/functions/unary_loss_function.hxx b/include/opengm/functions/unary_loss_function.hxx
deleted file mode 100644
index 21f5078..0000000
--- a/include/opengm/functions/unary_loss_function.hxx
+++ /dev/null
@@ -1,151 +0,0 @@
-#pragma once
-#ifndef OPENGM_UNARY_LOSS_FUNCTION
-#define OPENGM_UNARY_LOSS_FUNCTION
-
-#include "opengm/functions/function_properties_base.hxx"
-
-namespace opengm {
-
-
-
-
-
-
-
-
-
-/// \endcond
-
-/// UnaryLossFunction convert semi-ring in a lazy fashion
-///
-/// \ingroup functions
-template<class T,class I, class L>
-class UnaryLossFunction
-: public FunctionBase<UnaryLossFunction<T,I,L>, T,I,L>
-{
-public:
-
-   typedef T ValueType;
-   typedef T value_type;
-   typedef I IndexType;
-   typedef L LabelType;
-
-
-   enum LossType{
-        HammingLoss = 0,
-        LabelVectorConf = 1,
-        LabelVectorGt = 2,
-        LabelMatrix = 3,
-        L1Loss = 4,
-        L2Loss = 5
-   };
-
-   struct SharedMultiplers{
-        marray::Marray<ValueType> labelMult_;
-   };
-
-
-
-
-    UnaryLossFunction(
-        const LabelType numberOfLabels,
-        const LabelType gtLabel,
-        const LossType lossType, 
-        const ValueType multiplier,
-        const SharedMultiplers & sharedMultiplers,
-        const bool owner
-    );
-    template<class Iterator> ValueType operator()(Iterator begin) const;
-    IndexType shape(const IndexType) const;
-    IndexType dimension() const;
-    IndexType size() const;
-
-private:
-   LabelType numberOfLabels_;
-   LabelType gtLabel_;
-   LossType lossType_;
-   ValueType multiplier_;
-   const SharedMultiplers * sharedMultiplers_;
-   bool owner_;
-};
-
-template<class T,class I, class L>
-inline
-UnaryLossFunction<T,I,L>::UnaryLossFunction(
-    const LabelType numberOfLabels,
-    const LabelType gtLabel,
-    const LossType lossType, 
-    const ValueType multiplier,
-    const SharedMultiplers & sharedMultiplers,
-    const bool owner
-)
-:   numberOfLabels_(numberOfLabels),
-    gtLabel_(gtLabel),
-    lossType_(lossType),
-    multiplier_(multiplier),
-    sharedMultiplers_(&sharedMultiplers),
-    owner_(owner)
-{
-
-}
-
-template<class T,class I, class L>
-template<class Iterator>
-inline typename UnaryLossFunction<T,I,L>::ValueType
-UnaryLossFunction<T,I,L>::operator()
-(
-   Iterator begin
-) const {
-
-    const LabelType l = *begin;
-    const ValueType isDifferent = (l != gtLabel_ ?  1.0 : 0.0);
-
-    switch(lossType_){
-        case HammingLoss:{
-            return static_cast<ValueType>(-1.0) * multiplier_ * isDifferent;
-        }
-        case LabelVectorConf:{
-            return multiplier_ * isDifferent * sharedMultiplers_->labelMult_(l);
-        }
-        case LabelVectorGt:{
-            return multiplier_ * isDifferent * sharedMultiplers_->labelMult_(gtLabel_);
-        }
-        case LabelMatrix:{
-            return multiplier_ * isDifferent * sharedMultiplers_->labelMult_(l, gtLabel_);
-        }
-        case L1Loss:{
-            return multiplier_ * static_cast<ValueType>(std::abs(int(l)-int(gtLabel_)));
-        }
-        case L2Loss:{
-            return multiplier_ * std::pow(int(l)-int(gtLabel_),2);
-        }
-        default :{
-            throw RuntimeError("wrong loss type");
-        }
-    }
-}
-
-template<class T,class I, class L>
-inline typename UnaryLossFunction<T,I,L>::IndexType
-UnaryLossFunction<T,I,L>::shape
-(
-   const typename UnaryLossFunction<T,I,L>::IndexType index
-) const{
-   return numberOfLabels_;
-}
-
-template<class T,class I, class L>
-inline typename UnaryLossFunction<T,I,L>::IndexType
-UnaryLossFunction<T,I,L>::dimension() const {
-   return 1;
-}
-
-template<class T,class I, class L>
-inline typename UnaryLossFunction<T,I,L>::IndexType
-UnaryLossFunction<T,I,L>::size() const {
-   return numberOfLabels_;
-}
-
-} // namespace opengm
-
-#endif // #ifndef OPENGM_UNARY_LOSS_FUNCTION
diff --git a/include/opengm/functions/view_convert_function.hxx b/include/opengm/functions/view_convert_function.hxx
index 1cfad00..26ced72 100644
--- a/include/opengm/functions/view_convert_function.hxx
+++ b/include/opengm/functions/view_convert_function.hxx
@@ -18,10 +18,7 @@ namespace detail_convert_function {
       template<class T>
          static ProbabilityType convert(const T x)
             { return static_cast<ProbabilityType>(x); }
-      template<class T>
-         static ProbabilityType convert(const T x, const T invT)
-            { return static_cast<ProbabilityType>(x); }
-   }; 
+   };
 
    template<class PROBABILITY>
    struct ValueToProbability<Multiplier, Minimizer, PROBABILITY>
@@ -29,10 +26,7 @@ namespace detail_convert_function {
       typedef PROBABILITY ProbabilityType;
       template<class T>
          static ProbabilityType convert(const T x)
-            { return static_cast<ProbabilityType>(1) / static_cast<ProbabilityType>(x); } // is this correct ?!?
-      template<class T>
-         static ProbabilityType convert(const T x, const T invT)
-            { return static_cast<ProbabilityType>(1) / static_cast<ProbabilityType>(x); } // is this correct ?!?
+            { return static_cast<ProbabilityType>(1) / static_cast<ProbabilityType>(x); }
    };
 
    template<class PROBABILITY>
@@ -42,9 +36,6 @@ namespace detail_convert_function {
       template<class T>
          static ProbabilityType convert(const T x)
             { return static_cast<ProbabilityType>(std::exp(x)); }
-      template<class T>
-      static ProbabilityType convert(const T x, const T invT)
-            { return static_cast<ProbabilityType>(std::exp(invT * x)); }
    };
 
    template<class PROBABILITY>
@@ -53,10 +44,7 @@ namespace detail_convert_function {
       typedef PROBABILITY ProbabilityType;
       template<class T>
          static ProbabilityType convert(const T x)
-            { return static_cast<ProbabilityType>(std::exp(-x)); } 
-      template<class T>
-         static ProbabilityType convert(const T x, const T invT)
-            { return static_cast<ProbabilityType>(std::exp(-invT * x)); }
+            { return static_cast<ProbabilityType>(std::exp(-x)); }
    };
 }
 /// \endcond
@@ -79,7 +67,6 @@ public:
 
    ViewConvertFunction();
    ViewConvertFunction(const FactorType &);
-   ViewConvertFunction(const FactorType &, const ValueType);
    template<class Iterator> ValueType operator()(Iterator begin) const;
    IndexType shape(const IndexType) const;
    IndexType dimension() const;
@@ -87,13 +74,12 @@ public:
 
 private:
    FactorType const* factor_;
-   ValueType inverseTemperature_;
 };
 
 template<class GM,class ACC,class VALUE_TYPE>
 inline
 ViewConvertFunction<GM,ACC,VALUE_TYPE>::ViewConvertFunction()
-   :  factor_(NULL),inverseTemperature_(1)
+:  factor_(NULL)
 {}
 
 template<class GM,class ACC,class VALUE_TYPE>
@@ -102,17 +88,7 @@ ViewConvertFunction<GM,ACC,VALUE_TYPE>::ViewConvertFunction
 (
    const typename ViewConvertFunction<GM,ACC,VALUE_TYPE>::FactorType & factor
 )
-:  factor_(&factor),inverseTemperature_(1)
-{}
-
-template<class GM,class ACC,class VALUE_TYPE>
-inline
-ViewConvertFunction<GM,ACC,VALUE_TYPE>::ViewConvertFunction
-(
-   const typename ViewConvertFunction<GM,ACC,VALUE_TYPE>::FactorType & factor,
-   const VALUE_TYPE invT
-)
-:  factor_(&factor),inverseTemperature_(invT)
+:  factor_(&factor)
 {}
 
 template<class GM,class ACC,class VALUE_TYPE>
@@ -122,7 +98,7 @@ ViewConvertFunction<GM,ACC,VALUE_TYPE>::operator()
 (
    Iterator begin
 ) const {
-   return detail_convert_function::ValueToProbability<OperatorType,ACC,ValueType>::convert(factor_->operator()(begin),inverseTemperature_);
+   return detail_convert_function::ValueToProbability<OperatorType,ACC,ValueType>::convert(factor_->operator()(begin));
 }
 
 template<class GM,class ACC,class VALUE_TYPE>
diff --git a/include/opengm/graphicalmodel/graphicalmodel.hxx b/include/opengm/graphicalmodel/graphicalmodel.hxx
index 5db8227..1273e75 100755
--- a/include/opengm/graphicalmodel/graphicalmodel.hxx
+++ b/include/opengm/graphicalmodel/graphicalmodel.hxx
@@ -93,12 +93,6 @@ public:
    GraphicalModel(const SpaceType& ,const size_t reserveFactorsPerVariable=0);
    GraphicalModel& operator=(const GraphicalModel&);
 
-   template<class OTHER_TL>
-   GraphicalModel& operator=(
-        const GraphicalModel<T, OPERATOR, OTHER_TL, SPACE > & otherGM
-   );
-
-
    const SpaceType& space() const;
    IndexType numberOfVariables() const;
    IndexType numberOfVariables(const IndexType) const;
@@ -175,7 +169,7 @@ public:
    //}
 
    
-//protected:
+protected:
    template<size_t FUNCTION_INDEX>
       const std::vector<typename meta::TypeAtTypeList<FunctionTypeList, FUNCTION_INDEX>::type>& functions() const;
    template<size_t FUNCTION_INDEX>
@@ -797,42 +791,6 @@ GraphicalModel<T, OPERATOR, FUNCTION_TYPE_LIST, SPACE>::operator=
    return *this;
 }
    
-
-template<class T, class OPERATOR, class FUNCTION_TYPE_LIST, class SPACE>
-template<class OTHER_TL>
-inline GraphicalModel<T, OPERATOR, FUNCTION_TYPE_LIST, SPACE>&
-GraphicalModel<T, OPERATOR, FUNCTION_TYPE_LIST, SPACE>::operator=
-(
-   const GraphicalModel<T, OPERATOR, OTHER_TL, SPACE>& gm
-) {
-  
-    typedef GraphicalModel<T, OPERATOR, OTHER_TL, SPACE> OtherGm;
-    this->space_ = gm.space_;
-
-    //this->functionDataField_=gm.functionDataField_;
-
-    
-
-    std::vector<int> srcFidToTarget(OtherGm::NrOfFunctionTypes,-1);
-    detail_graphical_model::CopyFunctions<0, OtherGm::NrOfFunctionTypes >::op(gm, *this,srcFidToTarget);
-
-    this->factors_.resize(gm.factors_.size());
-    this->variableFactorAdjaceny_=gm.variableFactorAdjaceny_;    
-    this->factorsVis_ = gm.factorsVis_; 
-    this->order_ = gm.order_;
-
-    for(size_t i = 0; i<this->factors_.size(); ++i) {  
-        factors_[i].gm_=this;
-        factors_[i].functionIndex_=gm.factors_[i].functionIndex_;
-
-        int newFidFunctionId = srcFidToTarget[gm.factors_[i].functionTypeId_];
-        OPENGM_CHECK_OP(newFidFunctionId,>,-1,"INTERNAL ERROR");
-        factors_[i].functionTypeId_= newFidFunctionId;
-        factors_[i].vis_=gm.factors_[i].vis_;
-        factors_[i].vis_.assignPtr(this->factorsVis_);
-    }
-    return *this;
-}
    
 template<class T, class OPERATOR, class FUNCTION_TYPE_LIST, class SPACE>
 template<size_t FUNCTION_INDEX>
diff --git a/include/opengm/graphicalmodel/graphicalmodel_factor.hxx b/include/opengm/graphicalmodel/graphicalmodel_factor.hxx
index b2fa38b..519cc15 100755
--- a/include/opengm/graphicalmodel/graphicalmodel_factor.hxx
+++ b/include/opengm/graphicalmodel/graphicalmodel_factor.hxx
@@ -192,33 +192,6 @@ public:
    ValueType min() const;
    ValueType max() const;
    IndexType dimension()const{return this->numberOfVariables();}
-
-
-
-    template<class LABEL_ITER>
-    struct GmToFactorLabelIter{
-        typedef SubsetAccessor<VariablesIteratorType, LABEL_ITER> Accessor;
-        typedef AccessorIterator<Accessor, true> Iter;
-    };
-
-    template<class LABEL_ITER>    
-    typename GmToFactorLabelIter<LABEL_ITER>::Iter
-    gmToFactorLabelsBegin(LABEL_ITER gmLabelsBegin)const{
-        typedef typename GmToFactorLabelIter<LABEL_ITER>::Accessor Accessor;
-        Accessor accessor(variableIndicesBegin(),variableIndicesEnd(), gmLabelsBegin);
-        return Iter(accessor, 0);
-    }
-
-    template<class LABEL_ITER>    
-    typename GmToFactorLabelIter<LABEL_ITER>::Iter
-    gmToFactorLabelsEnd(LABEL_ITER gmLabelsBegin)const{
-        typedef typename GmToFactorLabelIter<LABEL_ITER>::Accessor Accessor;
-        typedef typename GmToFactorLabelIter<LABEL_ITER>::Iter Iter;
-        Accessor accessor(variableIndicesBegin(),variableIndicesEnd(), gmLabelsBegin);
-        return Iter(accessor, this->numberOfVariables());
-    }
-
-
 private:
    void testInvariant() const;
    //std::vector<IndexType> & variableIndexSequence();
diff --git a/include/opengm/graphicalmodel/graphicalmodel_function_wrapper.hxx b/include/opengm/graphicalmodel/graphicalmodel_function_wrapper.hxx
index 2556808..63683cb 100755
--- a/include/opengm/graphicalmodel/graphicalmodel_function_wrapper.hxx
+++ b/include/opengm/graphicalmodel/graphicalmodel_function_wrapper.hxx
@@ -39,85 +39,6 @@ template<class GRAPHICAL_MODEL> class Factor;
 
 namespace detail_graphical_model {
 
-
-    template<bool IN_LIST>
-    struct MaybeCopyFunctionVector;
-
-    template<>
-    struct MaybeCopyFunctionVector<true>{
-
-        template<class FVEC, class GM_T, class SRC_FID_TO_TARGET>
-        void static op(
-            const FVEC & functionsS,
-            GM_T & gmT,
-            SRC_FID_TO_TARGET & srcFidToTarget,
-            size_t indexInSource
-        ){
-
-          typedef typename GM_T::FunctionTypeList TargetList;
-          typedef opengm::meta::GetIndexInTypeList<TargetList,typename FVEC::value_type> IndexGetter;
-
-          srcFidToTarget[indexInSource] = IndexGetter::value;
-          gmT. template functions<IndexGetter::value>() = functionsS;
-        }
-    };
-
-    template<>
-    struct MaybeCopyFunctionVector<false>{
-
-        template<class FVEC, class GM_T, class SRC_FID_TO_TARGET>
-        void static op(
-            const FVEC & functionsS,
-            GM_T & gmT,
-            SRC_FID_TO_TARGET & srcFidToTarget,
-            size_t indexInSource
-        ){
-            srcFidToTarget[indexInSource] = -1;
-            OPENGM_CHECK_OP(functionsS.size(),==,0,"incompatible functions must have zero size");
-        }
-    };
-
-
-    template<size_t I, size_t DX>
-    struct CopyFunctions{
-
-        template<class GM_S, class GM_T, class SRC_FID_TO_TARGET>
-        void static op(
-            const GM_S & gmS,
-            GM_T & gmT,
-            SRC_FID_TO_TARGET & srcFidToTarget
-        ){
-            // 
-            typedef typename GM_S::FunctionTypeList SourceList;
-            typedef typename GM_T::FunctionTypeList TargetList;
-            typedef typename opengm::meta::TypeAtTypeList<SourceList, I>::type FType;
-
-            const std::vector<FType> & functions = gmS. template functions<I>();
-
-            typedef MaybeCopyFunctionVector<opengm::meta::HasTypeInTypeList<TargetList, FType>::value > CopyFVec;
-            CopyFVec::op(functions, gmT, srcFidToTarget, I);
-            // next function type
-            CopyFunctions<I+1, DX>::op(gmS,gmT,srcFidToTarget);
-        }
-    };
-    template<size_t DX>
-    struct CopyFunctions<DX,DX>{
-
-        template<class GM_S, class GM_T, class SRC_FID_TO_TARGET>
-        void static op(
-            const GM_S & gmS,
-            GM_T & gmT,
-            SRC_FID_TO_TARGET & srcFidToTarget
-        ){
-
-        }
-    };
-
-
-
-
-
-
    #define OPENGM_BASIC_FUNCTION_WRAPPER_CODE_GENERATOR_MACRO( RETURN_TYPE , FUNCTION_NAME ) \
    template<size_t NUMBER_OF_FUNCTIONS> \
    template<class GM> \
diff --git a/include/opengm/graphicalmodel/parameters.hxx b/include/opengm/graphicalmodel/parameters.hxx
new file mode 100644
index 0000000..22d9aa5
--- /dev/null
+++ b/include/opengm/graphicalmodel/parameters.hxx
@@ -0,0 +1,44 @@
+#ifndef OPENGM_PARAMETERS
+#define OPENGM_PARAMETERS
+
+
+namespace opengm{
+
+   template<class T,class I>
+   class Parameters{
+   public:
+      typedef T ValueType;
+      typedef I IndexType;
+
+
+      Parameters(const IndexType numberOfParameters=0)
+      : params_(numberOfParameters){
+
+      }
+
+      ValueType getParameter(const size_t pi)const{
+         OPENGM_ASSERT_OP(pi,<,params_.size());
+         return params_[pi];
+      }
+
+      void setParameter(const size_t pi,const ValueType value){
+         OPENGM_ASSERT_OP(pi,<,params_.size());
+         params_[pi]=value;
+      }
+
+      ValueType operator[](const size_t pi)const{
+         return getParameter(pi);
+      }
+
+      size_t numberOfParameters()const{
+         return params_.size();
+      }
+
+   private:
+
+      std::vector<ValueType> params_;
+   };
+}
+
+
+#endif /* OPENGM_PARAMETERS */
\ No newline at end of file
diff --git a/include/opengm/graphicalmodel/weights.hxx b/include/opengm/graphicalmodel/weights.hxx
deleted file mode 100644
index ee3ed2a..0000000
--- a/include/opengm/graphicalmodel/weights.hxx
+++ /dev/null
@@ -1,282 +0,0 @@
-#ifndef OPENGM_LEARNING_WEIGHTS
-#define OPENGM_LEARNING_WEIGHTS
-
-#include <opengm/opengm.hxx>
-
-namespace opengm{
-namespace learning{
-
-    /*
-    template<class T>
-    class Weights {
-    public:
-
-        typedef T ValueType;
-
-        Weights(const size_t numberOfWeights=0)
-        :   weights_(numberOfWeights)
-        {
-
-        }
-
-        ValueType getWeight(const size_t pi)const{
-            OPENGM_ASSERT_OP(pi,<,weights_.size());
-            return weights_[pi];
-        }
-
-        void setWeight(const size_t pi,const ValueType value){
-            OPENGM_ASSERT_OP(pi,<,weights_.size());
-            weights_[pi] = value;
-        }
-
-        const ValueType& operator[](const size_t pi)const{
-            return weights_[pi];
-        }
-
-        ValueType& operator[](const size_t pi) {
-            return weights_[pi];
-        }
-
-        size_t numberOfWeights()const{
-            return weights_.size();
-        }
-
-        size_t size()const{
-            return weights_.size();
-        }
-
-    private:
-
-        std::vector<ValueType> weights_;
-    };
-    */
-    template<class T>
-    class Weights : public marray::Vector<T>
-    {
-    public:
-
-        typedef T ValueType;
-
-        Weights(const size_t numberOfWeights=0)
-        :   marray::Vector<T>(numberOfWeights)
-        {
-
-        }
-
-        ValueType getWeight(const size_t pi)const{
-            OPENGM_ASSERT_OP(pi,<,this->size());
-            return (*this)[pi];
-        }
-
-        void setWeight(const size_t pi,const ValueType value){
-            OPENGM_ASSERT_OP(pi,<,this->size());
-            (*this)[pi] = value;
-        }
-
-
-        size_t numberOfWeights()const{
-            return this->size();
-        }
-
-    private:
-
-        //std::vector<ValueType> weights_;
-    };
-
-
-    template<class T>
-    class WeightRegularizer{
-    public:
-        enum RegularizationType{
-            NoRegularizer=-1,
-            L1Regularizer=1,
-            L2Regularizer=2
-        };
-
-        WeightRegularizer(const int regularizationNorm, const double lambda=1.0)
-        :   regularizationType_(),
-            lambda_(lambda){
-            if(regularizationNorm==-1){
-                regularizationType_ = NoRegularizer;
-            }
-            else if(regularizationNorm==1){
-                regularizationType_ = L1Regularizer;
-            }
-            else if(regularizationNorm==2){
-                regularizationType_ = L2Regularizer;
-            }
-            else{
-                throw opengm::RuntimeError("regularizationNorm must be -1 (NONE), 1 (L1) or 2 (L2)");
-            }
-        }
-        WeightRegularizer(const RegularizationType regularizationType=L2Regularizer, const double lambda=1.0)
-        :   regularizationType_(regularizationType),
-            lambda_(lambda){
-
-        }
-
-        double lambda()const{
-            return lambda_;
-        }
-
-        RegularizationType regularizationType()const{
-            return regularizationType_;
-        }
-
-        int regularizerNorm()const{
-            return static_cast<int>(regularizationType_);
-        }
-
-        double evaluate(const Weights<T> & weights){
-            if(regularizationType_== NoRegularizer){
-                return 0.0;
-            }
-            else if(regularizationType_ == L1Regularizer){
-                double val = 0.0;
-                for(size_t wi=0; wi<weights.size(); ++wi){
-                    val += std::abs(weights[wi]);
-                }
-                return val*lambda_;
-            }
-            else { //if(regularizationType_ == L2Regularizer){
-                double val = 0.0;
-                for(size_t wi=0; wi<weights.size(); ++wi){
-                    val += std::pow(weights[wi], 2);
-                }
-                return val*lambda_;
-            }
-        }
-
-    private:
-        RegularizationType regularizationType_;
-        double lambda_;
-    };
-
-
-    template<class T>
-    class WeightConstraints{
-    public:
-
-        WeightConstraints(const size_t nWeights = 0)
-        :   wLowerBounds_(nWeights,-1.0*std::numeric_limits<T>::infinity()),
-            wUpperBounds_(nWeights, 1.0*std::numeric_limits<T>::infinity()),
-            cLowerBounds_(),
-            cUpperBounds_(),
-            cOffset_(0),
-            cStart_(),
-            cSize_(),
-            cIndices_(),
-            cCoeff_(){
-
-        }
-        template<class ITER_LB, class ITER_UB>
-        WeightConstraints(ITER_LB lbBegin, ITER_LB lbEnd, ITER_UB ubBegin)
-        :   wLowerBounds_(lbBegin,lbEnd),
-            wUpperBounds_(ubBegin, ubBegin + std::distance(lbBegin, lbEnd)),
-            cLowerBounds_(),
-            cUpperBounds_(),
-            cOffset_(0),
-            cStart_(),
-            cSize_(),
-            cIndices_(),
-            cCoeff_()
-        {
-
-        }   
-        // query
-        size_t numberOfConstraints()const{
-            return cStart_.size();
-        }
-
-        T weightLowerBound(const size_t wi)const{
-            return wLowerBounds_[wi];
-        }
-        T weightUpperBound(const size_t wi)const{
-            return wUpperBounds_[wi];
-        }
-
-        const std::vector<T> & weightLowerBounds()const{
-            return wLowerBounds_;
-        }
-        const std::vector<T> & weightUpperBounds()const{
-            return wUpperBounds_;
-        }
-
-
-        size_t constraintSize(const size_t ci)const{
-            return cSize_[ci];
-        }
-        T constraintLowerBound(const size_t ci)const{
-            return cLowerBounds_[ci];
-        }
-        T constraintUpperBound(const size_t ci)const{
-            return cUpperBounds_[ci];
-        }
-
-        const std::vector<size_t> & constraintSizes()const{
-            return cLowerBounds_;
-        }
-        const std::vector<T> & constraintLowerBounds()const{
-            return cLowerBounds_;
-        }
-        const std::vector<T> & constraintUpperBounds()const{
-            return cUpperBounds_;
-        }
-
-        //  modification
-        template<class ITER_LB>
-        void setLowerBounds(ITER_LB lbBegin, ITER_LB lbEnd){
-            wLowerBounds_.assign(lbBegin, lbEnd);
-        }
-
-        template<class ITER_UB>
-        void setUpperBounds(ITER_UB ubBegin, ITER_UB ubEnd){
-            wUpperBounds_.assign(ubBegin, ubEnd);
-        }
-
-        template<class ITER_INDICES, class ITER_COEFF>
-        void addConstraint(ITER_INDICES indicesBegin, ITER_INDICES indicesEnd, ITER_COEFF coeffBegin, const T lowerBound, const T upperBound){
-            // length of this constraint
-            const size_t cSize = std::distance(indicesBegin, indicesEnd);
-            // store length of constraint
-            cSize_.push_back(cSize);
-
-            // store offset / index in 'cIndices_' and 'cCoeff_'
-            cStart_.push_back(cOffset_);
-
-            // increment the cOffset_ for the next constraint which
-            // could be added by the user
-            cOffset_ +=cSize;
-
-            // copy indices and coefficients
-            for( ;indicesBegin!=indicesEnd; ++indicesBegin,++coeffBegin){
-                cIndices_.push_back(*indicesBegin);
-                cCoeff_.push_back(*coeffBegin);
-            }
-        }
-
-    private:
-        // w upper-lower bound
-        std::vector<T> wLowerBounds_;
-        std::vector<T> wUpperBounds_;
-        // constraints 
-        std::vector<T> cLowerBounds_;
-        std::vector<T> cUpperBounds_;
-
-        size_t cOffset_;
-        std::vector<size_t> cStart_;
-        std::vector<size_t> cSize_;
-        std::vector<size_t> cIndices_;
-        std::vector<T>      cCoeff_;
-    };
-
-
-} // namespace learning
-} // namespace opengm
-
-
-
-
-
-
-#endif /* OPENGM_LEARNING_WEIGHTS */
diff --git a/include/opengm/inference/alphabetaswap.hxx b/include/opengm/inference/alphabetaswap.hxx
index 9a21537..c32461b 100644
--- a/include/opengm/inference/alphabetaswap.hxx
+++ b/include/opengm/inference/alphabetaswap.hxx
@@ -22,33 +22,14 @@ public:
    typedef opengm::visitors::EmptyVisitor<AlphaBetaSwap<GM,INF> >   EmptyVisitorType;
    typedef opengm::visitors::TimingVisitor<AlphaBetaSwap<GM,INF> >  TimingVisitorType;
 
+   struct Parameter {
+      Parameter() {
+         maxNumberOfIterations_ = 1000;
+      }
 
-    template<class _GM>
-    struct RebindGm{
-        typedef typename INF:: template RebindGm<_GM>::type RebindedInf;
-        typedef AlphaBetaSwap<_GM, RebindedInf> type;
-    };
-
-    template<class _GM,class _ACC>
-    struct RebindGmAndAcc{
-        typedef typename INF:: template RebindGmAndAcc<_GM,_ACC>::type RebindedInf;
-        typedef AlphaBetaSwap<_GM, RebindedInf> type;
-    };
-
-
-    struct Parameter {
-        Parameter() {
-            maxNumberOfIterations_ = 1000;
-        }
-        template<class P>
-        Parameter(const P & p)
-        :   parameter_(p.parameter_),
-            maxNumberOfIterations_(maxNumberOfIterations_){
-        }
-
-        typename InferenceType::Parameter parameter_; 
-        size_t maxNumberOfIterations_; 
-    };
+      typename InferenceType::Parameter parameter_; 
+      size_t maxNumberOfIterations_; 
+   };
 
    AlphaBetaSwap(const GraphicalModelType&, Parameter = Parameter());
    std::string name() const;
diff --git a/include/opengm/inference/alphaexpansion.hxx b/include/opengm/inference/alphaexpansion.hxx
index d5b21e3..a1b9d04 100644
--- a/include/opengm/inference/alphaexpansion.hxx
+++ b/include/opengm/inference/alphaexpansion.hxx
@@ -22,62 +22,35 @@ public:
    typedef visitors::EmptyVisitor<AlphaExpansion<GM,INF> >   EmptyVisitorType;
    typedef visitors::TimingVisitor<AlphaExpansion<GM,INF> >  TimingVisitorType;
 
-    template<class _GM>
-    struct RebindGm{
-        typedef typename INF:: template RebindGm<_GM>::type RebindedInf;
-        typedef AlphaExpansion<_GM, RebindedInf> type;
-    };
-
-    template<class _GM,class _ACC>
-    struct RebindGmAndAcc{
-        typedef typename INF:: template RebindGmAndAcc<_GM,_ACC>::type RebindedInf;
-        typedef AlphaExpansion<_GM, RebindedInf> type;
-    };
-
-    struct Parameter {
-        typedef typename InferenceType::Parameter InferenceParameter;
-        enum LabelingIntitialType {DEFAULT_LABEL, RANDOM_LABEL, LOCALOPT_LABEL, EXPLICIT_LABEL};
-        enum OrderType {DEFAULT_ORDER, RANDOM_ORDER, EXPLICIT_ORDER};
-
-        Parameter
-        (
-            const size_t maxNumberOfSteps  = 1000,
-            const InferenceParameter& para = InferenceParameter()
-        )
-        :   parameter_(para),
-            maxNumberOfSteps_(maxNumberOfSteps),
-            labelInitialType_(DEFAULT_LABEL),
-            orderType_(DEFAULT_ORDER),
-            randSeedOrder_(0),
-            randSeedLabel_(0),
-            labelOrder_(),
-            label_()
-        {}
-
-        template<class P>
-        Parameter
-        (
-            const P & p
-        )
-        :   parameter_(p.parameter_),
-            maxNumberOfSteps_(p.maxNumberOfSteps_),
-            labelInitialType_(p.labelInitialType_),
-            orderType_(p.orderType_),
-            randSeedOrder_(p.randSeedOrder_),
-            randSeedLabel_(p.randSeedLabel_),
-            labelOrder_(p.labelOrder_),
-            label_(p.labelOrder_)
-        {}
-
-        InferenceParameter parameter_;
-        size_t maxNumberOfSteps_;
-        LabelingIntitialType labelInitialType_;
-        OrderType orderType_;
-        unsigned int randSeedOrder_;
-        unsigned int randSeedLabel_;
-        std::vector<LabelType> labelOrder_;
-        std::vector<LabelType> label_;
-    };
+   struct Parameter {
+      typedef typename InferenceType::Parameter InferenceParameter;
+      enum LabelingIntitialType {DEFAULT_LABEL, RANDOM_LABEL, LOCALOPT_LABEL, EXPLICIT_LABEL};
+      enum OrderType {DEFAULT_ORDER, RANDOM_ORDER, EXPLICIT_ORDER};
+
+      Parameter
+      (
+         const size_t maxNumberOfSteps  = 1000,
+         const InferenceParameter& para = InferenceParameter()
+      )
+      :  parameter_(para),
+         maxNumberOfSteps_(maxNumberOfSteps),
+         labelInitialType_(DEFAULT_LABEL),
+         orderType_(DEFAULT_ORDER),
+         randSeedOrder_(0),
+         randSeedLabel_(0),
+         labelOrder_(),
+         label_()
+      {}
+
+      InferenceParameter parameter_;
+      size_t maxNumberOfSteps_;
+      LabelingIntitialType labelInitialType_;
+      OrderType orderType_;
+      unsigned int randSeedOrder_;
+      unsigned int randSeedLabel_;
+      std::vector<LabelType> labelOrder_;
+      std::vector<LabelType> label_;
+   };
 
    AlphaExpansion(const GraphicalModelType&, Parameter para = Parameter());
 
diff --git a/include/opengm/inference/alphaexpansionfusion.hxx b/include/opengm/inference/alphaexpansionfusion.hxx
index 9c26741..81f7e6a 100644
--- a/include/opengm/inference/alphaexpansionfusion.hxx
+++ b/include/opengm/inference/alphaexpansionfusion.hxx
@@ -27,57 +27,30 @@ public:
    typedef visitors::EmptyVisitor<AlphaExpansionFusion<GM,ACC> >   EmptyVisitorType;
    typedef visitors::TimingVisitor<AlphaExpansionFusion<GM,ACC> >  TimingVisitorType;
 
-    template<class _GM>
-    struct RebindGm{
-        typedef AlphaExpansionFusion<_GM, ACC> type;
-    };
-
-    template<class _GM,class _ACC>
-    struct RebindGmAndAcc{
-        typedef AlphaExpansionFusion<_GM, _ACC> type;
-    };
-
-    struct Parameter {
-        enum LabelingIntitialType {DEFAULT_LABEL, RANDOM_LABEL, 
-                                   LOCALOPT_LABEL, EXPLICIT_LABEL};
-        enum OrderType {DEFAULT_ORDER, RANDOM_ORDER, 
-                        EXPLICIT_ORDER};
-
-
-        Parameter
-        (
-            const size_t maxNumberOfSteps  = 1000
-        )
-        :   maxNumberOfSteps_(maxNumberOfSteps),
-            labelInitialType_(DEFAULT_LABEL),
-            orderType_(DEFAULT_ORDER),
-            randSeedOrder_(0),
-            randSeedLabel_(0),
-            labelOrder_(),
-            label_()
-        {}
-
-        template<class P>
-        Parameter
-        (
-            const P & p
-        )
-        :   maxNumberOfSteps_(p.maxNumberOfSteps_),
-            labelInitialType_(p.labelInitialType_),
-            orderType_(p.orderType_),
-            randSeedOrder_(p.randSeedOrder_),
-            randSeedLabel_(p.randSeedLabel_),
-            labelOrder_(p.labelOrder_),
-            label_(p.labelOrder_)
-        {}
-
-        size_t maxNumberOfSteps_;
-        LabelingIntitialType labelInitialType_;
-        OrderType orderType_;
-        unsigned int randSeedOrder_;
-        unsigned int randSeedLabel_;
-        std::vector<LabelType> labelOrder_;
-        std::vector<LabelType> label_;
+   struct Parameter {
+      enum LabelingIntitialType {DEFAULT_LABEL, RANDOM_LABEL, LOCALOPT_LABEL, EXPLICIT_LABEL};
+      enum OrderType {DEFAULT_ORDER, RANDOM_ORDER, EXPLICIT_ORDER};
+
+      Parameter
+      (
+         const size_t maxNumberOfSteps  = 1000
+      )
+      :  maxNumberOfSteps_(maxNumberOfSteps),
+         labelInitialType_(DEFAULT_LABEL),
+         orderType_(DEFAULT_ORDER),
+         randSeedOrder_(0),
+         randSeedLabel_(0),
+         labelOrder_(),
+         label_()
+      {}
+
+      size_t maxNumberOfSteps_;
+      LabelingIntitialType labelInitialType_;
+      OrderType orderType_;
+      unsigned int randSeedOrder_;
+      unsigned int randSeedLabel_;
+      std::vector<LabelType> labelOrder_;
+      std::vector<LabelType> label_;
    };
 
    AlphaExpansionFusion(const GraphicalModelType&, Parameter para = Parameter());
diff --git a/include/opengm/inference/astar.hxx b/include/opengm/inference/astar.hxx
index d98e00e..1084d1e 100644
--- a/include/opengm/inference/astar.hxx
+++ b/include/opengm/inference/astar.hxx
@@ -66,6 +66,7 @@ namespace opengm {
    public:
       ///graphical model type
       typedef GM                                          GraphicalModelType;
+      // -- obsolet --  typedef typename GraphicalModelType::template Rebind<true>::RebindType EditableGraphicalModelType;
       ///accumulation type
       typedef ACC                                         AccumulationType;
       OPENGM_GM_TYPE_TYPEDEFS;
@@ -78,81 +79,59 @@ namespace opengm {
       typedef opengm::visitors::TimingVisitor<AStar<GM, ACC> > TimingVisitorType;
       typedef opengm::visitors::EmptyVisitor<AStar<GM, ACC> > EmptyVisitorType;
       
-
-    template<class _GM>
-    struct RebindGm{
-        typedef AStar<_GM, ACC> type;
-    };
-
-    template<class _GM,class _ACC>
-    struct RebindGmAndAcc{
-        typedef AStar<_GM, _ACC > type;
-    };
-
-
-    enum Heuristic{
-        DEFAULT_HEURISTIC = 0,
-        FAST_HEURISTIC = 1,
-        STANDARD_HEURISTIC = 2
-    };
-
-    struct Parameter {
-        Parameter()
-        {
-            maxHeapSize_    = 3000000;
-            numberOfOpt_    = 1;
-            objectiveBound_ = AccumulationType::template neutral<ValueType>();
-            heuristic_      = Parameter::DEFAULTHEURISTIC;
-        };
-        
-        template<class P>
-        Parameter(const P & p )
-        :   maxHeapSize_(p.maxHeapSize_),
-            numberOfOpt_(p.numberOfOpt_),
-            objectiveBound_(p.objectiveBound_),
-            nodeOrder_(p.nodeOrder_),
-            treeFactorIds_(p.treeFactorIds_){
-        }
-
-        /// \brief add tree factor id
-        /// \param id factor id
-        void addTreeFactorId(size_t id)
-        { treeFactorIds_.push_back(id); }
-        /// DEFAULTHEURISTIC ;
-        static const size_t DEFAULTHEURISTIC = 0;
-        /// FASTHEURISTIC
-        static const size_t FASTHEURISTIC = 1;
-        /// STANDARDHEURISTIC
-        static const size_t STANDARDHEURISTIC = 2;
-        /// maxHeapSize_ maximum size of the heap
-        size_t maxHeapSize_;
-        /// number od N-best solutions that should be found
-        size_t              numberOfOpt_;
-        /// objective bound
-        ValueType          objectiveBound_;
-        /// heuritstic
-        ///
-        /// DEFAULTHEURISTIC = 0;
-        /// FASTHEURISTIC = 1
-        /// STANDARDHEURISTIC = 2
-        size_t heuristic_;  
-        std::vector<IndexType> nodeOrder_;
-        std::vector<size_t> treeFactorIds_;
-
-    };
-
-    AStar(const GM& gm, Parameter para = Parameter());
-    virtual std::string name() const {return "AStar";}
-    const GraphicalModelType& graphicalModel() const;
-    virtual InferenceTermination infer();
-    virtual void reset();
-    template<class VisitorType> InferenceTermination infer(VisitorType& vistitor);
-    ValueType bound()const {return belowBound_;}
-    ValueType value()const;
-    virtual InferenceTermination marginal(const size_t,IndependentFactorType& out)const        {return UNKNOWN;}
-    virtual InferenceTermination factorMarginal(const size_t, IndependentFactorType& out)const {return UNKNOWN;}
-    virtual InferenceTermination arg(std::vector<LabelType>& v, const size_t = 1)const;
-    virtual InferenceTermination args(std::vector< std::vector<LabelType> >& v)const;
+      enum Heuristic{
+         DEFAULT_HEURISTIC = 0,
+         FAST_HEURISTIC = 1,
+         STANDARD_HEURISTIC = 2
+      };
+      struct Parameter {
+         Parameter()
+            {
+               maxHeapSize_    = 3000000;
+               numberOfOpt_    = 1;
+               objectiveBound_ = AccumulationType::template neutral<ValueType>();
+               heuristic_      = Parameter::DEFAULTHEURISTIC;
+            };
+            /// constuctor
+
+         /// \brief add tree factor id
+         /// \param id factor id
+         void addTreeFactorId(size_t id)
+            { treeFactorIds_.push_back(id); }
+         /// DEFAULTHEURISTIC ;
+         static const size_t DEFAULTHEURISTIC = 0;
+         /// FASTHEURISTIC
+         static const size_t FASTHEURISTIC = 1;
+         /// STANDARDHEURISTIC
+         static const size_t STANDARDHEURISTIC = 2;
+         /// maxHeapSize_ maximum size of the heap
+         size_t maxHeapSize_;
+         /// number od N-best solutions that should be found
+         size_t              numberOfOpt_;
+         /// objective bound
+         ValueType          objectiveBound_;
+         /// heuritstic
+         ///
+         /// DEFAULTHEURISTIC = 0;
+         /// FASTHEURISTIC = 1
+         /// STANDARDHEURISTIC = 2
+         size_t heuristic_;  
+         std::vector<IndexType> nodeOrder_;
+         std::vector<size_t> treeFactorIds_;
+       
+      };
+      AStar(const GM& gm, Parameter para = Parameter());
+      virtual std::string name() const {return "AStar";}
+      const GraphicalModelType& graphicalModel() const;
+      virtual InferenceTermination infer();
+      virtual void reset();
+      template<class VisitorType> InferenceTermination infer(VisitorType& vistitor);
+      ValueType bound()const {return belowBound_;}
+      ValueType value()const;
+      virtual InferenceTermination marginal(const size_t,IndependentFactorType& out)const        {return UNKNOWN;}
+      virtual InferenceTermination factorMarginal(const size_t, IndependentFactorType& out)const {return UNKNOWN;}
+      virtual InferenceTermination arg(std::vector<LabelType>& v, const size_t = 1)const;
+      virtual InferenceTermination args(std::vector< std::vector<LabelType> >& v)const;
 
    private:
       const GM&                                   gm_;
diff --git a/include/opengm/inference/auxiliary/fusion_move/fusion_mover.hxx b/include/opengm/inference/auxiliary/fusion_move/fusion_mover.hxx
index 0d0f10d..ad46fd6 100644
--- a/include/opengm/inference/auxiliary/fusion_move/fusion_mover.hxx
+++ b/include/opengm/inference/auxiliary/fusion_move/fusion_mover.hxx
@@ -405,18 +405,6 @@ template<class GM, class ACC>
 class HlFusionMover{
 
 public:
-
-    template<class _GM>
-    struct RebindGm{
-        typedef HlFusionMover<_GM, ACC> type;
-    };
-
-    template<class _GM,class _ACC>
-    struct RebindGmAndAcc{
-        typedef HlFusionMover<_GM, _ACC> type;
-    };
-
-
     typedef GM GraphicalModelType;
     typedef ACC AccumulationType;
     OPENGM_GM_TYPE_TYPEDEFS;
@@ -467,21 +455,6 @@ public:
         {
 
         }
-
-        template<class P>
-        Parameter(const P & p)
-        : 
-            fusionSolver_(p.fusionSolver_),
-            maxSubgraphSize_(p.maxSubgraphSize_),
-            reducedInf_(p.reducedInf_),
-            connectedComponents_(p.connectedComponents_),
-            tentacles_(p.tentacles_),
-            fusionTimeLimit_(p.fusionTimeLimit_)
-        {
-
-        }
-
-
         FusionSolver fusionSolver_;
         size_t maxSubgraphSize_;
         bool reducedInf_;
diff --git a/include/opengm/inference/auxiliary/lp_solver/lp_solver_interface.hxx b/include/opengm/inference/auxiliary/lp_solver/lp_solver_interface.hxx
index 45e4e90..e27b290 100644
--- a/include/opengm/inference/auxiliary/lp_solver/lp_solver_interface.hxx
+++ b/include/opengm/inference/auxiliary/lp_solver/lp_solver_interface.hxx
@@ -631,14 +631,10 @@ namespace opengm {
 template <class LP_SOLVER_TYPE, class VALUE_TYPE, class INDEX_TYPE, class SOLUTION_ITERATOR_TYPE, class SOLVER_TIMING_TYPE>
 inline LPSolverInterface<LP_SOLVER_TYPE, VALUE_TYPE, INDEX_TYPE, SOLUTION_ITERATOR_TYPE, SOLVER_TIMING_TYPE>::Parameter::Parameter()
    : numberOfThreads_(LPDef::default_numberOfThreads_),
-     verbose_(LPDef::default_verbose_), 
-     cutUp_(LPDef::default_cutUp_),
-     epOpt_(LPDef::default_epOpt_), 
-     epMrk_(LPDef::default_epMrk_),
-     epRHS_(LPDef::default_epRHS_), 
-     epInt_(LPDef::default_epInt_),
-     epAGap_(LPDef::default_epAGap_), 
-     epGap_(LPDef::default_epGap_),
+     verbose_(LPDef::default_verbose_), cutUp_(LPDef::default_cutUp_),
+     epOpt_(LPDef::default_epOpt_), epMrk_(LPDef::default_epMrk_),
+     epRHS_(LPDef::default_epRHS_), epInt_(LPDef::default_epInt_),
+     epAGap_(LPDef::default_epAGap_), epGap_(LPDef::default_epGap_),
      workMem_(LPDef::default_workMem_),
      treeMemoryLimit_(LPDef::default_treeMemoryLimit_),
      timeLimit_(LPDef::default_timeLimit_),
diff --git a/include/opengm/inference/bruteforce.hxx b/include/opengm/inference/bruteforce.hxx
index 2548293..1189486 100644
--- a/include/opengm/inference/bruteforce.hxx
+++ b/include/opengm/inference/bruteforce.hxx
@@ -23,26 +23,7 @@ public:
    typedef visitors::VerboseVisitor<Bruteforce<GM,ACC> > VerboseVisitorType;
    typedef visitors::EmptyVisitor<Bruteforce<GM,ACC> >   EmptyVisitorType;
    typedef visitors::TimingVisitor<Bruteforce<GM,ACC> >  TimingVisitorType;
-
-    template<class _GM>
-    struct RebindGm{
-        typedef Bruteforce<_GM, ACC> type;
-    };
-
-    template<class _GM,class _ACC>
-    struct RebindGmAndAcc{
-        typedef Bruteforce<_GM, _ACC > type;
-    };
-
-   struct Parameter {
-        Parameter(){
-
-        }
-        template<class P>
-        Parameter(const P & p){
-
-        }
-   };
+   class Parameter {};
 
    Bruteforce(const GraphicalModelType&);
    Bruteforce(const GraphicalModelType&, const Parameter&);
diff --git a/include/opengm/inference/combilp.hxx b/include/opengm/inference/combilp.hxx
index 639dbe9..632628d 100644
--- a/include/opengm/inference/combilp.hxx
+++ b/include/opengm/inference/combilp.hxx
@@ -413,17 +413,6 @@ namespace opengm{
       typedef ACC AccumulationType;
       typedef GM GraphicalModelType;
 
-        template<class _GM>
-        struct RebindGm{
-            typedef CombiLP<_GM, ACC, LPSOLVER> type;
-        };
-
-        template<class _GM,class _ACC>
-        struct RebindGmAndAcc{
-            typedef CombiLP<_GM, _ACC, LPSOLVER> type;
-        };
-
-
       OPENGM_GM_TYPE_TYPEDEFS;
       typedef visitors::VerboseVisitor<CombiLP<GM, ACC, LPSOLVER> > VerboseVisitorType;
       typedef visitors::EmptyVisitor<CombiLP<GM, ACC, LPSOLVER> >   EmptyVisitorType;
diff --git a/include/opengm/inference/dualdecomposition/dualdecomposition_bundle.hxx b/include/opengm/inference/dualdecomposition/dualdecomposition_bundle.hxx
index 64a7dad..1bee922 100644
--- a/include/opengm/inference/dualdecomposition/dualdecomposition_bundle.hxx
+++ b/include/opengm/inference/dualdecomposition/dualdecomposition_bundle.hxx
@@ -49,20 +49,6 @@ namespace opengm {
       typedef typename DDBaseType::SubVariableType               SubVariableType;
       typedef typename DDBaseType::SubVariableListType           SubVariableListType; 
 
-
-        template<class _GM>
-        struct RebindGm{
-            typedef typename INF:: template RebindGm<_GM>::type RebindedInf;
-            typedef DualDecompositionSubGradient<_GM, RebindedInf, DUALBLOCK> type;
-        };
-
-        template<class _GM,class _ACC>
-        struct RebindGmAndAcc{
-            typedef typename INF:: template RebindGm<_GM,_ACC>::type RebindedInf;
-            typedef DualDecompositionSubGradient<_GM, RebindedInf, DUALBLOCK> type;
-        };
-
-
       class Parameter : public DualDecompositionBaseParameter{
       public: 
          /// The relative accuracy which have to be garantee to stop with an approximative solution (set 0 for optimality)
@@ -93,20 +79,6 @@ namespace opengm {
               noBundle_(false),
               useHeuristicStepsize_(true)
             {};
-
-        template<class P>
-        Parameter(const P & p)
-        :
-            minimalRelAccuracy_(p.minimalRelAccuracy_),
-            subPara_(subPara_),
-            relativeDualBoundPrecision_(p.relativeDualBoundPrecision_),
-            maxBundlesize_(p.maxBundlesize_),
-            activeBoundFixing_(p.activeBoundFixing_),
-            minDualWeight_(p.minDualWeight_),
-            maxDualWeight_(p.maxDualWeight_),
-            noBundle_(p.noBundle_),
-            useHeuristicStepsize_(p.useHeuristicStepsize_){
-        }
       };
 
       using  DualDecompositionBase<GmType, DualBlockType >::gm_;
diff --git a/include/opengm/inference/dualdecomposition/dualdecomposition_subgradient.hxx b/include/opengm/inference/dualdecomposition/dualdecomposition_subgradient.hxx
index ae3247d..2bee7c0 100644
--- a/include/opengm/inference/dualdecomposition/dualdecomposition_subgradient.hxx
+++ b/include/opengm/inference/dualdecomposition/dualdecomposition_subgradient.hxx
@@ -43,35 +43,14 @@ namespace opengm {
       typedef typename DDBaseType::SubVariableType               SubVariableType;
       typedef typename DDBaseType::SubVariableListType           SubVariableListType; 
 
-        template<class _GM>
-        struct RebindGm{
-            typedef typename INF:: template RebindGm<_GM>::type RebindedInf;
-            typedef DualDecompositionSubGradient<_GM, RebindedInf, DUALBLOCK> type;
-        };
-
-        template<class _GM,class _ACC>
-        struct RebindGmAndAcc{
-            typedef typename INF:: template RebindGm<_GM,_ACC>::type RebindedInf;
-            typedef DualDecompositionSubGradient<_GM, RebindedInf, DUALBLOCK> type;
-        };
-
-
-        class Parameter : public DualDecompositionBaseParameter{
-        public:
-            /// Parameter for Subproblems
-            typename InfType::Parameter subPara_;
-            bool useAdaptiveStepsize_;
-            bool useProjectedAdaptiveStepsize_;
-            Parameter() : useAdaptiveStepsize_(false), useProjectedAdaptiveStepsize_(false){};
-
-            template<class P>
-            Parameter(const P & p)
-            :   subPara_(p.subPara_),
-                useAdaptiveStepsize_(p.useAdaptiveStepsize_),
-                useProjectedAdaptiveStepsize_(p.useProjectedAdaptiveStepsize_){
-
-            }
-        };
+      class Parameter : public DualDecompositionBaseParameter{
+      public:
+         /// Parameter for Subproblems
+         typename InfType::Parameter subPara_;
+         bool useAdaptiveStepsize_;
+         bool useProjectedAdaptiveStepsize_;
+         Parameter() : useAdaptiveStepsize_(false), useProjectedAdaptiveStepsize_(false){};
+      };
 
       using  DualDecompositionBase<GmType, DualBlockType >::gm_;
       using  DualDecompositionBase<GmType, DualBlockType >::subGm_;
diff --git a/include/opengm/inference/dynamicprogramming.hxx b/include/opengm/inference/dynamicprogramming.hxx
index 436c36e..ec402cb 100644
--- a/include/opengm/inference/dynamicprogramming.hxx
+++ b/include/opengm/inference/dynamicprogramming.hxx
@@ -24,27 +24,7 @@ namespace opengm {
     typedef visitors::VerboseVisitor<DynamicProgramming<GM, ACC> > VerboseVisitorType;
     typedef visitors::EmptyVisitor<DynamicProgramming<GM, ACC> >   EmptyVisitorType;
     typedef visitors::TimingVisitor<DynamicProgramming<GM, ACC> >  TimingVisitorType;
-
-
-    template<class _GM>
-    struct RebindGm{
-        typedef DynamicProgramming<_GM, ACC> type;
-    };
-
-    template<class _GM,class _ACC>
-    struct RebindGmAndAcc{
-        typedef DynamicProgramming<_GM, _ACC > type;
-    };
-
     struct Parameter {
-        Parameter(){
-
-        }
-        template<class P>
-        Parameter(const P &p)
-        : roots_(p.roots_){
-        }
-        
       std::vector<IndexType> roots_;
     };
 
diff --git a/include/opengm/inference/external/ad3.hxx b/include/opengm/inference/external/ad3.hxx
index 8ccaaba..42c9d2d 100644
--- a/include/opengm/inference/external/ad3.hxx
+++ b/include/opengm/inference/external/ad3.hxx
@@ -30,16 +30,6 @@ namespace opengm {
          typedef visitors::EmptyVisitor<AD3Inf<GM,ACC> >   EmptyVisitorType;
          typedef visitors::TimingVisitor<AD3Inf<GM,ACC> >  TimingVisitorType;
          
-        template<class _GM>
-        struct RebindGm{
-            typedef AD3Inf<_GM,ACC> type;
-        };
-
-        template<class _GM,class _ACC>
-        struct RebindGmAndAcc{
-            typedef AD3Inf<_GM,_ACC> type;
-        };
-
          enum SolverType{
             AD3_LP,
             AD3_ILP,
@@ -64,19 +54,6 @@ namespace opengm {
             {  
             }
 
-            template<class P>
-            Parameter(
-               const  P & p
-            ) :
-               solverType_(p.solverType_),
-               eta_(p.eta_),
-               adaptEta_(p.adaptEta_),
-               steps_(p.steps_),
-               residualThreshold_(p.residualThreshold_),
-               verbosity_(p.verbosity_)
-            {  
-            }
-
             SolverType  solverType_;
 
             double      eta_;
diff --git a/include/opengm/inference/external/daoopt.hxx b/include/opengm/inference/external/daoopt.hxx
index 9d584df..477553c 100644
--- a/include/opengm/inference/external/daoopt.hxx
+++ b/include/opengm/inference/external/daoopt.hxx
@@ -51,17 +51,6 @@ namespace opengm {
          typedef visitors::EmptyVisitor<DAOOPT<GM> >   EmptyVisitorType;
          typedef visitors::TimingVisitor<DAOOPT<GM> >  TimingVisitorType;
 
-
-        template<class _GM>
-        struct RebindGm{
-            typedef DAOOPT<_GM,ACC> type;
-        };
-
-        template<class _GM,class _ACC>
-        struct RebindGmAndAcc{
-            typedef DAOOPT<_GM,_ACC> type;
-        };
-
          ///Parameter inherits from daoopt ProgramOptions
          struct Parameter : public daoopt::ProgramOptions {
             /// \brief Constructor
@@ -79,21 +68,6 @@ namespace opengm {
                sampleRepeat = 1;
                aobbLookahead = 5;
             }
-            template<class P>
-            Parameter(const P & p) : daoopt::ProgramOptions() {
-               // set default options, this is not done for all parameters by daoopt
-               subprobOrder = p.subprobOrder;
-               ibound = p.ibound;
-               cbound = p.cbound;
-               cbound_worker = p.cbound_worker;
-               rotateLimit = p.rotateLimit;
-               order_iterations = p.order_iterations;
-               order_timelimit = p.order_timelimit;
-               threads = p.threads;
-               sampleDepth = p.sampleDepth;
-               sampleRepeat = p.sampleRepeat;
-               aobbLookahead = p.aobbLookahead;
-            }
          };
 
          // construction
diff --git a/include/opengm/inference/external/fastPD.hxx b/include/opengm/inference/external/fastPD.hxx
index f200f30..0c8abe2 100644
--- a/include/opengm/inference/external/fastPD.hxx
+++ b/include/opengm/inference/external/fastPD.hxx
@@ -35,16 +35,6 @@ namespace opengm {
          typedef visitors::EmptyVisitor<FastPD<GM> >   EmptyVisitorType;
          typedef visitors::TimingVisitor<FastPD<GM> >  TimingVisitorType;
 
-        template<class _GM>
-        struct RebindGm{
-            typedef FastPD<_GM,ACC> type;
-        };
-
-        template<class _GM,class _ACC>
-        struct RebindGmAndAcc{
-            typedef FastPD<_GM,_ACC> type;
-        };
-
          ///Parameter
          struct Parameter {
             /// \brief Constructor
@@ -52,12 +42,6 @@ namespace opengm {
             }
             /// number of iterations
             size_t numberOfIterations_;
-
-            template<class P>
-            Parameter(const P & p)
-            : numberOfIterations_(p.numberOfIterations_){
-                
-            }
          };
          // construction
          FastPD(const GraphicalModelType& gm, const Parameter& para = Parameter());
diff --git a/include/opengm/inference/external/mrflib.hxx b/include/opengm/inference/external/mrflib.hxx
index 911e985..99e2055 100644
--- a/include/opengm/inference/external/mrflib.hxx
+++ b/include/opengm/inference/external/mrflib.hxx
@@ -43,17 +43,6 @@ namespace opengm {
          typedef visitors::TimingVisitor<MRFLIB<GM> >  TimingVisitorType;
          typedef size_t VariableIndex;
          ///Parameter
-
-        template<class _GM>
-        struct RebindGm{
-            typedef MRFLIB<_GM> type;
-        };
-
-        template<class _GM,class _ACC>
-        struct RebindGmAndAcc{
-            typedef MRFLIB<_GM> type;
-        };
-
          struct Parameter {
             /// possible optimization algorithms for MRFLIB
             enum InferenceType {ICM, EXPANSION, SWAP, MAXPRODBP, TRWS, BPS};
@@ -71,10 +60,6 @@ namespace opengm {
             Parameter(const InferenceType inferenceType = ICM, const EnergyType energyType = VIEW, const size_t numberOfIterations = 1000)
                : inferenceType_(inferenceType), energyType_(energyType), numberOfIterations_(numberOfIterations), trwsTolerance_(0.0) {
             }
-            template<class P>
-            Parameter(const P & p)
-               : inferenceType_(p.inferenceType_), energyType_(p.energyType_), numberOfIterations_(p.numberOfIterations_), trwsTolerance_(p.trwsTolerance_) {
-            }
          };
          // construction
          MRFLIB(const GraphicalModelType& gm, const Parameter& para = Parameter());
diff --git a/include/opengm/inference/external/qpbo.hxx b/include/opengm/inference/external/qpbo.hxx
index ff69937..ea0b732 100644
--- a/include/opengm/inference/external/qpbo.hxx
+++ b/include/opengm/inference/external/qpbo.hxx
@@ -35,20 +35,8 @@ namespace opengm {
             TB0, TB1, TBX
          };
 
-        template<class _GM>
-        struct RebindGm{
-            typedef QPBO<_GM> type;
-        };
-
-        template<class _GM,class _ACC>
-        struct RebindGmAndAcc{
-            typedef QPBO<_GM> type;
-        };
-
-
-
-        ///Parameter for opengm::external::QPBO
-        struct Parameter {
+         ///Parameter for opengm::external::QPBO
+         struct Parameter {
             /// using probeing technique
             bool useProbeing_;
             /// forcing strong persistency
@@ -59,23 +47,12 @@ namespace opengm {
             std::vector<size_t> label_;
             /// \brief constructor
 
-            template<class P>
-            Parameter(const P & p)
-            :
-                strongPersistency_(p.strongPersistency_),
-                useImproveing_ (p.useImproveing_),
-                useProbeing_ (p.useProbeing_)
-            {
-
-            }
-
-
             Parameter() {
-                strongPersistency_ = true;
-                useImproveing_ = false;
-                useProbeing_ = false;
+               strongPersistency_ = true;
+               useImproveing_ = false;
+               useProbeing_ = false;
             }
-        };
+         };
          // construction
          QPBO(const GraphicalModelType& gm, const Parameter para = Parameter());
          ~QPBO();
diff --git a/include/opengm/inference/external/trws.hxx b/include/opengm/inference/external/trws.hxx
index 3be5757..69ce500 100644
--- a/include/opengm/inference/external/trws.hxx
+++ b/include/opengm/inference/external/trws.hxx
@@ -45,22 +45,10 @@ namespace opengm {
          typedef visitors::EmptyVisitor<TRWS<GM> >   EmptyVisitorType;
          typedef visitors::TimingVisitor<TRWS<GM> >  TimingVisitorType;
          typedef size_t VariableIndex;
-
-
-        template<class _GM>
-        struct RebindGm{
-            typedef TRWS<_GM> type;
-        };
-
-        template<class _GM,class _ACC>
-        struct RebindGmAndAcc{
-            typedef TRWS<_GM> type;
-        };
-
          ///Parameter
          struct Parameter {
             /// possible energy types for TRWS
-            enum EnergyType {VIEW=0, TABLES=1, TL1=2, TL2=3/*, WEIGHTEDTABLE*/};
+            enum EnergyType {VIEW, TABLES, TL1, TL2/*, WEIGHTEDTABLE*/};
             /// number of iterations
             size_t numberOfIterations_;
             /// random starting message
@@ -78,30 +66,6 @@ namespace opengm {
             /// Calculate MinMarginals
             bool calculateMinMarginals_;
             /// \brief Constructor
-            template<class P>
-            Parameter(const P & p) 
-            :   numberOfIterations_(p.numberOfIterations_),
-                useRandomStart_(p.useRandomStart_),
-                useZeroStart_(p.useZeroStart_),
-                doBPS_(p.doBPS_),
-                energyType_(),
-                tolerance_(p.tolerance_),
-                minDualChange_(p.minDualChange_)
-            {
-               if(p.energyType_==0){
-                    energyType_ =VIEW;
-               }
-               else if(p.energyType_==1){
-                    energyType_ =TABLES;
-               }
-               else if(p.energyType_==2){
-                    energyType_ =TL1;
-               }
-               else if(p.energyType_==3){
-                    energyType_ =TL2;
-               }
-            };
-
             Parameter() {
                numberOfIterations_ = 1000;
                useRandomStart_ = false;
diff --git a/include/opengm/inference/fusion_based_inf.hxx b/include/opengm/inference/fusion_based_inf.hxx
index 3ef0c3a..d4f2a1c 100644
--- a/include/opengm/inference/fusion_based_inf.hxx
+++ b/include/opengm/inference/fusion_based_inf.hxx
@@ -121,6 +121,8 @@ namespace proposal_gen{
 
 
 
+
+
     template<class GM, class ACC>
     class UpDownGen
     {
@@ -945,17 +947,6 @@ public:
     typedef typename ProposalGen::Parameter ProposalParameter;
     typedef typename FusionMoverType::Parameter FusionParameter;
 
-    template<class _GM>
-    struct RebindGm{
-        typedef typename PROPOSAL_GEN:: template RebindGm<_GM>::type _P;
-        typedef FusionBasedInf<_GM, _P> type;
-    };
-
-    template<class _GM,class _ACC>
-    struct RebindGmAndAcc{
-        typedef typename PROPOSAL_GEN:: template RebindGmAndAcc<_GM, _ACC>::type _P;
-        typedef FusionBasedInf<_GM, _P> type;
-    };
 
 
     class Parameter
@@ -974,16 +965,6 @@ public:
         {
 
         }
-
-        template<class P>
-        Parameter(const P & p)
-        :   proposalParam_(p.proposalParam_),
-            fusionParam_(p.fusionParam_),
-            numIt_(p.numIt_),
-            numStopIt_(p.numStopIt_){
-        }
-
-
         ProposalParameter proposalParam_;
         FusionParameter fusionParam_;
         size_t numIt_;
diff --git a/include/opengm/inference/graphcut.hxx b/include/opengm/inference/graphcut.hxx
index 314babd..1a65656 100644
--- a/include/opengm/inference/graphcut.hxx
+++ b/include/opengm/inference/graphcut.hxx
@@ -17,17 +17,6 @@ namespace opengm {
 template<class GM, class ACC, class MINSTCUT>
 class GraphCut : public Inference<GM, ACC> {
 public:
-
-    template<class _GM>
-    struct RebindGm{
-        typedef GraphCut<_GM, ACC, MINSTCUT> type;
-    };
-
-    template<class _GM,class _ACC>
-    struct RebindGmAndAcc{
-        typedef GraphCut<_GM, _ACC, MINSTCUT> type;
-    };
-
    typedef ACC AccumulationType;
    typedef GM GraphicalModelType;
    OPENGM_GM_TYPE_TYPEDEFS;
@@ -35,18 +24,12 @@ public:
    typedef visitors::VerboseVisitor<GraphCut<GM, ACC, MINSTCUT> > VerboseVisitorType;
    typedef visitors::EmptyVisitor<GraphCut<GM, ACC, MINSTCUT> >   EmptyVisitorType;
    typedef visitors::TimingVisitor<GraphCut<GM, ACC, MINSTCUT> >  TimingVisitorType;
-    struct Parameter {
-        Parameter(const ValueType scale = 1)
-        :   scale_(scale) {
-        }
-      
-        template<class P>
-        Parameter(const P & p)
-        :   scale_(p.scale_){
-        }
-
-        ValueType scale_;
-    };
+   struct Parameter {
+      Parameter(const ValueType scale = 1)
+         : scale_(scale) 
+         {}
+      ValueType scale_;
+   };
 
    GraphCut(const GraphicalModelType&, const Parameter& = Parameter(), ValueType = static_cast<ValueType>(0.0));
    GraphCut(size_t numVar, std::vector<size_t> numFacDim, const Parameter& = Parameter(), ValueType = static_cast<ValueType>(0.0));
diff --git a/include/opengm/inference/greedygremlin.hxx b/include/opengm/inference/greedygremlin.hxx
index a282bcd..ae679b3 100644
--- a/include/opengm/inference/greedygremlin.hxx
+++ b/include/opengm/inference/greedygremlin.hxx
@@ -44,25 +44,8 @@ namespace opengm {
       typedef visitors::EmptyVisitor<GreedyGremlin<GM, ACC> >   EmptyVisitorType;
       typedef visitors::TimingVisitor<GreedyGremlin<GM, ACC> >  TimingVisitorType;
       
-        template<class _GM>
-        struct RebindGm{
-            typedef GreedyGremlin<_GM, ACC> type;
-        };
-
-        template<class _GM,class _ACC>
-        struct RebindGmAndAcc{
-            typedef GreedyGremlin<_GM, _ACC > type;
-        };
-
-
       struct Parameter {
-            Parameter(){
-
-            }
-            template<class P>
-            Parameter(const P & p){
-                
-            }
+       
       };
       GreedyGremlin(const GM& gm, Parameter para = Parameter());
       virtual std::string name() const {return "GreedyGremlin";}
diff --git a/include/opengm/inference/hqpbo.hxx b/include/opengm/inference/hqpbo.hxx
index b67fc80..0b41d57 100644
--- a/include/opengm/inference/hqpbo.hxx
+++ b/include/opengm/inference/hqpbo.hxx
@@ -29,28 +29,7 @@ public:
    typedef visitors::TimingVisitor<HQPBO<GM,ACC> > TimingVisitorType;
    typedef visitors::EmptyVisitor<HQPBO<GM,ACC> > EmptyVisitorType;
 
-
-    template<class _GM>
-    struct RebindGm{
-        typedef HQPBO<_GM, ACC> type;
-    };
-
-    template<class _GM,class _ACC>
-    struct RebindGmAndAcc{
-        typedef HQPBO<_GM, _ACC > type;
-    };
-
-
-
-    struct Parameter {
-        Parameter(){
-
-        }
-        template<class P>
-        Parameter(const P & p){
-            
-        }
-     };
+   struct Parameter {};
 
    HQPBO(const GraphicalModelType&, Parameter = Parameter());
    std::string name() const;
diff --git a/include/opengm/inference/icm.hxx b/include/opengm/inference/icm.hxx
index 7b70ac9..136e466 100644
--- a/include/opengm/inference/icm.hxx
+++ b/include/opengm/inference/icm.hxx
@@ -36,44 +36,33 @@ public:
    typedef opengm::visitors::EmptyVisitor<ICM<GM,ACC> >  EmptyVisitorType;
    typedef opengm::visitors::TimingVisitor<ICM<GM,ACC> > TimingVisitorType;
 
-    template<class _GM>
-    struct RebindGm{
-        typedef ICM<_GM, ACC> type;
-    };
+   class Parameter {
+   public:
+      Parameter(
+         const std::vector<LabelType>& startPoint
+      )
+      :  moveType_(SINGLE_VARIABLE),
+         startPoint_(startPoint) 
+         {}
 
-    template<class _GM,class _ACC>
-    struct RebindGmAndAcc{
-        typedef ICM<_GM, _ACC> type;
-    };
-
-
-    class Parameter {
-    public:
-       
-       Parameter(const std::vector<LabelType>& startPoint)
-          :  moveType_(SINGLE_VARIABLE),
-             startPoint_(startPoint) 
-          {}
-       Parameter(MoveType moveType, const std::vector<LabelType>& startPoint)
-          :  moveType_(moveType),
-             startPoint_(startPoint) 
-          {}
-
-       Parameter(MoveType moveType = SINGLE_VARIABLE)
-          :  moveType_(moveType),
-             startPoint_() 
-          {}
-
-       template<class OP>
-       Parameter(const OP & otherParameter)
-          {
-             moveType_ = otherParameter.moveType_== 0? SINGLE_VARIABLE : FACTOR; 
-             startPoint_(otherParameter.startPoint_); 
-          }
-
-        MoveType moveType_;
-        std::vector<LabelType>  startPoint_;
-    };
+      Parameter(
+         MoveType moveType, 
+         const std::vector<LabelType>& startPoint 
+      )
+      :  moveType_(moveType),
+         startPoint_(startPoint) 
+         {}
+      
+      Parameter(
+         MoveType moveType = SINGLE_VARIABLE
+      )
+      :  moveType_(moveType),
+         startPoint_() 
+      {}
+      
+      MoveType moveType_;
+      std::vector<LabelType>  startPoint_;
+   };
 
    ICM(const GraphicalModelType&);
    ICM(const GraphicalModelType&, const Parameter&);
diff --git a/include/opengm/inference/infandflip.hxx b/include/opengm/inference/infandflip.hxx
index 09820c6..dede301 100644
--- a/include/opengm/inference/infandflip.hxx
+++ b/include/opengm/inference/infandflip.hxx
@@ -33,22 +33,6 @@ public:
    typedef visitors::EmptyVisitor<InfAndFlip<GM, ACC, INF> >   EmptyVisitorType;
    typedef visitors::TimingVisitor<InfAndFlip<GM, ACC, INF> >  TimingVisitorType;
 
-
-
-    template<class _GM>
-    struct RebindGm{
-        typedef typename INF::template RebindGm<_GM>::type _I;
-        typedef InfAndFlip<_GM, ACC, _I> type;
-    };
-
-    template<class _GM,class _ACC>
-    struct RebindGmAndAcc{
-        typedef typename INF::template RebindGmAndAcc<_GM,_ACC>::type _I;
-        typedef InfAndFlip<_GM, _ACC, _I> type;
-    };
-
-
-
    struct Parameter
    {
       Parameter(const size_t maxSubgraphSize=2)
@@ -57,13 +41,6 @@ public:
          subPara_(),
          warmStartableInf_(false){
       }
-      template<class P>
-      Parameter(const P & p)
-      : 
-         maxSubgraphSize_(p.maxSubgraphSize_),
-         subPara_(p.subPara_),
-         warmStartableInf_(p.warmStartableInf_){
-      }
 
       size_t maxSubgraphSize_;
       typename INF::Parameter subPara_;
diff --git a/include/opengm/inference/inference.hxx b/include/opengm/inference/inference.hxx
index 2f52edc..46a774e 100644
--- a/include/opengm/inference/inference.hxx
+++ b/include/opengm/inference/inference.hxx
@@ -29,15 +29,6 @@ enum InferenceTermination {
    INFERENCE_ERROR=4
 };
 
-
-template<class INF>
-inline void infer(const typename INF::GraphicalModelType & gm, const typename INF::Parameter & param, std::vector<typename INF::LabelType> & conf){
-    INF inf(gm, param);
-    inf.infer();
-    inf.arg(conf);
-}
-
-
 /// Inference algorithm interface
 template <class GM, class ACC>
 class Inference
diff --git a/include/opengm/inference/lazyflipper.hxx b/include/opengm/inference/lazyflipper.hxx
index 53689ab..c7929f6 100644
--- a/include/opengm/inference/lazyflipper.hxx
+++ b/include/opengm/inference/lazyflipper.hxx
@@ -117,18 +117,6 @@ private:
 template<class GM, class ACC = Minimizer>
 class LazyFlipper : public Inference<GM, ACC> {
 public:
-
-    template<class _GM>
-    struct RebindGm{
-        typedef LazyFlipper<_GM, ACC> type;
-    };
-
-    template<class _GM,class _ACC>
-    struct RebindGmAndAcc{
-        typedef LazyFlipper<_GM, _ACC> type;
-    };
-
-
    typedef ACC AccumulationType;
    typedef GM GraphicalModelType;
    OPENGM_GM_TYPE_TYPEDEFS;
@@ -157,29 +145,20 @@ public:
          const size_t maxSubgraphSize = 2,
          const Tribool inferMultilabel = Tribool::Maybe
       )
-      :  maxSubgraphSize_(maxSubgraphSize), 
+      :  maxSubgraphSize_(maxSubgraphSize),
          startingPoint_(),
          inferMultilabel_(inferMultilabel)
       {}
 
-      template<class P>
-      Parameter(
-         const P & p         
-      )
-      :  maxSubgraphSize_(p.maxSubgraphSize_),
-         startingPoint_(p.startingPoint_),
-         inferMultilabel_(p.inferMultilabel_)
-      {}
-
       size_t maxSubgraphSize_;
       std::vector<LabelType> startingPoint_;
       Tribool inferMultilabel_;
    };
 
-   //LazyFlipper(const GraphicalModelType&, const size_t = 2, const Tribool useMultilabelInference = Tribool::Maybe);
-   LazyFlipper(const GraphicalModelType& gm, Parameter param = Parameter());
-   //template<class StateIterator>
-      //LazyFlipper(const GraphicalModelType&, const size_t, StateIterator, const Tribool useMultilabelInference = Tribool::Maybe);
+   LazyFlipper(const GraphicalModelType&, const size_t = 2, const Tribool useMultilabelInference = Tribool::Maybe);
+   LazyFlipper(const GraphicalModelType& gm, typename LazyFlipper::Parameter param);
+   template<class StateIterator>
+      LazyFlipper(const GraphicalModelType&, const size_t, StateIterator, const Tribool useMultilabelInference = Tribool::Maybe);
    std::string name() const;
    const GraphicalModelType& graphicalModel() const;
    const size_t maxSubgraphSize() const;
@@ -606,37 +585,37 @@ Forest<T>::setLevelOrderSuccessor(
 
 // implementation of LazyFlipper
 
-//template<class GM, class ACC>
-//inline
-//LazyFlipper<GM, ACC>::LazyFlipper(
-//   const GraphicalModelType& gm,
-//   const size_t maxSubgraphSize,
-//   const Tribool useMultilabelInference
-//)
-//:  gm_(gm),
-//   variableAdjacency_(Adjacency(gm.numberOfVariables())),
-//   movemaker_(Movemaker<GM>(gm)),
-//   subgraphForest_(SubgraphForest()),
-//   maxSubgraphSize_(maxSubgraphSize),
-//   useMultilabelInference_(useMultilabelInference)
-//{
-//   if(gm_.numberOfVariables() == 0) {
-//      throw RuntimeError("The graphical model has no variables.");
-//   }
-//   setMaxSubgraphSize(maxSubgraphSize);
-//   // initialize activation_
-//   activation_[0].append(gm_.numberOfVariables());
-//   activation_[1].append(gm_.numberOfVariables());
-//   // initialize variableAdjacency_
-//   for(size_t j=0; j<gm_.numberOfFactors(); ++j) {
-//      const FactorType& factor = gm_[j];
-//      for(size_t m=0; m<factor.numberOfVariables(); ++m) {
-//         for(size_t n=m+1; n<factor.numberOfVariables(); ++n) {
-//            variableAdjacency_.connect(factor.variableIndex(m), factor.variableIndex(n));
-//         }
-//      }
-//   }
-//}
+template<class GM, class ACC>
+inline
+LazyFlipper<GM, ACC>::LazyFlipper(
+   const GraphicalModelType& gm,
+   const size_t maxSubgraphSize,
+   const Tribool useMultilabelInference
+)
+:  gm_(gm),
+   variableAdjacency_(Adjacency(gm.numberOfVariables())),
+   movemaker_(Movemaker<GM>(gm)),
+   subgraphForest_(SubgraphForest()),
+   maxSubgraphSize_(maxSubgraphSize),
+   useMultilabelInference_(useMultilabelInference)
+{
+   if(gm_.numberOfVariables() == 0) {
+      throw RuntimeError("The graphical model has no variables.");
+   }
+   setMaxSubgraphSize(maxSubgraphSize);
+   // initialize activation_
+   activation_[0].append(gm_.numberOfVariables());
+   activation_[1].append(gm_.numberOfVariables());
+   // initialize variableAdjacency_
+   for(size_t j=0; j<gm_.numberOfFactors(); ++j) {
+      const FactorType& factor = gm_[j];
+      for(size_t m=0; m<factor.numberOfVariables(); ++m) {
+         for(size_t n=m+1; n<factor.numberOfVariables(); ++n) {
+            variableAdjacency_.connect(factor.variableIndex(m), factor.variableIndex(n));
+         }
+      }
+   }
+}
 
 template<class GM, class ACC>
 inline
@@ -677,40 +656,40 @@ inline void
 LazyFlipper<GM, ACC>::reset()
 {}
 
-///// \todo next version: get rid of redundancy with other constructor
-//template<class GM, class ACC>
-//template<class StateIterator>
-//inline
-//LazyFlipper<GM, ACC>::LazyFlipper(
-//   const GraphicalModelType& gm,
-//   const size_t maxSubgraphSize,
-//   StateIterator it,
-//   const Tribool useMultilabelInference
-//)
-//:  gm_(gm),
-//   variableAdjacency_(Adjacency(gm_.numberOfVariables())),
-//   movemaker_(Movemaker<GM>(gm, it)),
-//   subgraphForest_(SubgraphForest()),
-//   maxSubgraphSize_(2),
-//   useMultilabelInference_(useMultilabelInference)
-//{
-//   if(gm_.numberOfVariables() == 0) {
-//      throw RuntimeError("The graphical model has no variables.");
-//   }
-//   setMaxSubgraphSize(maxSubgraphSize);
-//   // initialize activation_
-//   activation_[0].append(gm_.numberOfVariables());
-//   activation_[1].append(gm_.numberOfVariables());
-//   // initialize variableAdjacency_
-//   for(size_t j=0; j<gm_.numberOfFactors(); ++j) {
-//      const FactorType& factor = gm_[j];
-//      for(size_t m=0; m<factor.numberOfVariables(); ++m) {
-//         for(size_t n=m+1; n<factor.numberOfVariables(); ++n) {
-//            variableAdjacency_.connect(factor.variableIndex(m), factor.variableIndex(n));
-//         }
-//      }
-//   }
-//}
+/// \todo next version: get rid of redundancy with other constructor
+template<class GM, class ACC>
+template<class StateIterator>
+inline
+LazyFlipper<GM, ACC>::LazyFlipper(
+   const GraphicalModelType& gm,
+   const size_t maxSubgraphSize,
+   StateIterator it,
+   const Tribool useMultilabelInference
+)
+:  gm_(gm),
+   variableAdjacency_(Adjacency(gm_.numberOfVariables())),
+   movemaker_(Movemaker<GM>(gm, it)),
+   subgraphForest_(SubgraphForest()),
+   maxSubgraphSize_(2),
+   useMultilabelInference_(useMultilabelInference)
+{
+   if(gm_.numberOfVariables() == 0) {
+      throw RuntimeError("The graphical model has no variables.");
+   }
+   setMaxSubgraphSize(maxSubgraphSize);
+   // initialize activation_
+   activation_[0].append(gm_.numberOfVariables());
+   activation_[1].append(gm_.numberOfVariables());
+   // initialize variableAdjacency_
+   for(size_t j=0; j<gm_.numberOfFactors(); ++j) {
+      const FactorType& factor = gm_[j];
+      for(size_t m=0; m<factor.numberOfVariables(); ++m) {
+         for(size_t n=m+1; n<factor.numberOfVariables(); ++n) {
+            variableAdjacency_.connect(factor.variableIndex(m), factor.variableIndex(n));
+         }
+      }
+   }
+}
 
 template<class GM, class ACC>
 inline void
diff --git a/include/opengm/inference/loc.hxx b/include/opengm/inference/loc.hxx
index 9ecea8d..aa7c821 100644
--- a/include/opengm/inference/loc.hxx
+++ b/include/opengm/inference/loc.hxx
@@ -81,16 +81,6 @@ public:
    typedef opengm::LPCplex<SubGmType,AccumulationType> LpCplexSubInf;
    #endif
 
-    template<class _GM>
-    struct RebindGm{
-        typedef LOC<_GM, ACC> type;
-    };
-
-    template<class _GM,class _ACC>
-    struct RebindGmAndAcc{
-        typedef LOC<_GM, _ACC > type;
-    };
-
 
    class Parameter {
    public:
@@ -125,25 +115,6 @@ public:
       {
 
       }
-
-      template<class P>
-      Parameter
-      (
-         const  P & p
-      )
-      :  solver_(p.solver_),
-         phi_(p.phi_),
-         maxBlockRadius_(p.maxBlockRadius_),
-         maxTreeRadius_(p.maxTreeRadius_),
-         pFastHeuristic_(p.pFastHeuristic_),
-         maxIterations_(p.maxIterations_),
-         stopAfterNBadIterations_(p.stopAfterNBadIterations_),
-         maxBlockSize_(p.maxBlockSize_),
-         treeRuns_(p.treeRuns_)
-      {
-
-      }
-      
       // subsolver used for submodel ("ad3" or "astar" so far)
       std::string solver_;
       /// phi of the truncated geometric distribution is used to select a certain subgraph radius with a certain probability
diff --git a/include/opengm/inference/lp_inference_base.hxx b/include/opengm/inference/lp_inference_base.hxx
index cf47706..36fc750 100644
--- a/include/opengm/inference/lp_inference_base.hxx
+++ b/include/opengm/inference/lp_inference_base.hxx
@@ -1842,6 +1842,7 @@ inline LPInferenceBase<LP_INFERENCE_TYPE>::LPInferenceBase(const GraphicalModelT
    if(!opengm::meta::Compare<OperatorType, opengm::Adder>::value) {
       throw RuntimeError("This implementation does only supports Min-Sum-Semiring and Max-Sum-Semiring.");
    }
+
    // sort factors
    sortFactors();
 
diff --git a/include/opengm/inference/lpcplex.hxx b/include/opengm/inference/lpcplex.hxx
index 6508354..28fafcf 100644
--- a/include/opengm/inference/lpcplex.hxx
+++ b/include/opengm/inference/lpcplex.hxx
@@ -45,16 +45,6 @@ public:
    typedef visitors::EmptyVisitor<LPCplex<GM,ACC> >   EmptyVisitorType;
    typedef visitors::TimingVisitor<LPCplex<GM,ACC> >  TimingVisitorType;
 
-    template<class _GM>
-    struct RebindGm{
-        typedef LPCplex<_GM, ACC> type;
-    };
-
-    template<class _GM,class _ACC>
-    struct RebindGmAndAcc{
-        typedef LPCplex<_GM, _ACC > type;
-    };
-
 
 //   enum LP_SOLVER {LP_SOLVER_AUTO,  LP_SOLVER_PRIMAL_SIMPLEX,  LP_SOLVER_DUAL_SIMPLEX,  LP_SOLVER_NETWORK_SIMPLEX,  LP_SOLVER_BARRIER,  LP_SOLVER_SIFTING,  LP_SOLVER_CONCURRENT};
 //   enum LP_PRESOLVE{LP_PRESOLVE_AUTO, LP_PRESOLVE_OFF,  LP_PRESOLVE_CONSEVATIVE,  LP_PRESOLVE_AGRESSIVE}; 
@@ -62,9 +52,9 @@ public:
  
    class Parameter {
    public:
-      bool   integerConstraint_; // ILP=true, 1order-LP=false
-      int    numberOfThreads_;    // number of threads (0=autosect)
-      bool   verbose_;           // switch on/off verbode mode 
+      bool integerConstraint_; // ILP=true, 1order-LP=false
+      int numberOfThreads_;    // number of threads (0=autosect)
+      bool verbose_;           // switch on/off verbode mode 
       double cutUp_;           // upper cutoff
       double epOpt_;           // Optimality tolerance  
       double epMrk_;           // Markowitz tolerance 
@@ -75,62 +65,25 @@ public:
       double workMem_;         // maximal ammount of memory in MB used for workspace
       double treeMemoryLimit_; // maximal ammount of memory in MB used for treee
       double timeLimit_;       // maximal time in seconds the solver has
-      int    probeingLevel_;
+      int probeingLevel_;
       //int coverCutLevel_;
       //int disjunctiverCutLevel_;
       //int cliqueCutLevel_;
       //int MIRCutLevel_;
-      LP_SOLVER    rootAlg_;
-      LP_SOLVER    nodeAlg_;
-      LP_PRESOLVE  presolve_;
+      LP_SOLVER rootAlg_;
+      LP_SOLVER nodeAlg_;
+      LP_PRESOLVE presolve_;
       MIP_EMPHASIS mipEmphasis_;
-      MIP_CUT      cutLevel_;       // Determines whether or not to cuts for the problem and how aggressively (will be overruled by specific ones). 
-      MIP_CUT      cliqueCutLevel_; // Determines whether or not to generate clique cuts for the problem and how aggressively. 
-      MIP_CUT      coverCutLevel_;  // Determines whether or not to generate cover cuts for the problem and how aggressively. 
-      MIP_CUT      gubCutLevel_;    // Determines whether or not to generate generalized upper bound (GUB) cuts for the problem and how aggressively. 
-      MIP_CUT      mirCutLevel_;    // Determines whether or not mixed integer rounding (MIR) cuts should be generated for the problem and how aggressively.  
-      MIP_CUT      iboundCutLevel_; // Determines whether or not to generate implied bound cuts for the problem and how aggressively.
-      MIP_CUT      flowcoverCutLevel_; //Determines whether or not to generate flow cover cuts for the problem and how aggressively. 
-      MIP_CUT      flowpathCutLevel_; //Determines whether or not to generate flow path cuts for the problem and how aggressively.
-      MIP_CUT      disjunctCutLevel_; // Determines whether or not to generate disjunctive cuts for the problem and how aggressively.
-      MIP_CUT      gomoryCutLevel_; // Determines whether or not to generate gomory fractional cuts for the problem and how aggressively. 
-
-      template<class P>
-      Parameter(
-        const P & p
-      ):
-
-        integerConstraint_(p.integerConstraint_),
-        numberOfThreads_(p.numberOfThreads_),
-        verbose_(p.verbose_),
-        cutUp_(p.cutUp_),
-        epOpt_(p.epOpt_),
-        epMrk_(p.epMrk_),
-        epRHS_(p.epRHS_),
-        epInt_(p.epInt_),
-        epAGap_(p.epAGap_),
-        epGap_(p.epGap_),
-        workMem_(p.workMem_),
-        treeMemoryLimit_(p.treeMemoryLimit_),
-        timeLimit_(p.timeLimit_),
-        probeingLevel_(p.probeingLevel_),
-        rootAlg_(p.rootAlg_),
-        nodeAlg_(p.nodeAlg_),
-        presolve_(p.presolve_),
-        mipEmphasis_(p.mipEmphasis_),
-        cutLevel_(p.cutLevel_),
-        cliqueCutLevel_(p.cliqueCutLevel_),
-        coverCutLevel_(p.coverCutLevel_),
-        gubCutLevel_(p.gubCutLevel_),
-        mirCutLevel_(p.mirCutLevel_),
-        iboundCutLevel_(p.iboundCutLevel_),
-        flowcoverCutLevel_(p.flowcoverCutLevel_),
-        flowpathCutLevel_(p.flowpathCutLevel_),
-        disjunctCutLevel_(p.disjunctCutLevel_),
-        gomoryCutLevel_(p.gomoryCutLevel_)
-      {
-
-      }
+      MIP_CUT cutLevel_;       // Determines whether or not to cuts for the problem and how aggressively (will be overruled by specific ones). 
+      MIP_CUT cliqueCutLevel_; // Determines whether or not to generate clique cuts for the problem and how aggressively. 
+      MIP_CUT coverCutLevel_;  // Determines whether or not to generate cover cuts for the problem and how aggressively. 
+      MIP_CUT gubCutLevel_;    // Determines whether or not to generate generalized upper bound (GUB) cuts for the problem and how aggressively. 
+      MIP_CUT mirCutLevel_;    // Determines whether or not mixed integer rounding (MIR) cuts should be generated for the problem and how aggressively.  
+      MIP_CUT iboundCutLevel_; // Determines whether or not to generate implied bound cuts for the problem and how aggressively.
+      MIP_CUT flowcoverCutLevel_; //Determines whether or not to generate flow cover cuts for the problem and how aggressively. 
+      MIP_CUT flowpathCutLevel_; //Determines whether or not to generate flow path cuts for the problem and how aggressively.
+      MIP_CUT disjunctCutLevel_; // Determines whether or not to generate disjunctive cuts for the problem and how aggressively.
+      MIP_CUT gomoryCutLevel_; // Determines whether or not to generate gomory fractional cuts for the problem and how aggressively. 
 
       /// constructor
       /// \param cutUp upper cutoff - assume that: min_x f(x) <= cutUp 
@@ -165,9 +118,6 @@ public:
          disjunctCutLevel_(MIP_CUT_AUTO), 
          gomoryCutLevel_(MIP_CUT_AUTO)
          {
-
-
-            
             numberOfThreads_   = numberOfThreads; 
             integerConstraint_ = false; 
             LPDef lpdef;
@@ -485,7 +435,7 @@ LPCplex<GM, ACC>::infer
          break; 
       }
 
-      // MIP EMPHASIS
+      // MIP EMPHASIS 
       switch(parameter_.mipEmphasis_) {
       case MIP_EMPHASIS_BALANCED:
          cplex_.setParam(IloCplex::MIPEmphasis, 0);
@@ -506,11 +456,11 @@ LPCplex<GM, ACC>::infer
 
       // verbose options
       if(parameter_.verbose_ == false) {
-       cplex_.setParam(IloCplex::MIPDisplay, 0);
-       cplex_.setParam(IloCplex::BarDisplay, 0);
-       cplex_.setParam(IloCplex::SimDisplay, 0);
-       cplex_.setParam(IloCplex::NetDisplay, 0);
-       cplex_.setParam(IloCplex::SiftDisplay, 0);
+	cplex_.setParam(IloCplex::MIPDisplay, 0);
+        cplex_.setParam(IloCplex::BarDisplay, 0);
+	cplex_.setParam(IloCplex::SimDisplay, 0);
+        cplex_.setParam(IloCplex::NetDisplay, 0);
+	cplex_.setParam(IloCplex::SiftDisplay, 0);
       } 
          
       // tolarance settings
@@ -558,13 +508,12 @@ LPCplex<GM, ACC>::infer
       //cplex_.setParam(IloCplex::MIRCuts, parameter_.MIRCutLevel_);
   
       // solve problem
-
       if(!cplex_.solve()) {
          std::cout << "failed to optimize. " <<cplex_.getStatus() << std::endl;
          inferenceStarted_ = 0;
          return UNKNOWN;
       } 
-      cplex_.getValues(sol_, x_);
+      cplex_.getValues(sol_, x_);  
    }
    catch(IloCplex::Exception e) {
       std::cout << "caught CPLEX exception: " << e << std::endl;
diff --git a/include/opengm/inference/lpcplex2.hxx b/include/opengm/inference/lpcplex2.hxx
index b2a5d20..8769055 100644
--- a/include/opengm/inference/lpcplex2.hxx
+++ b/include/opengm/inference/lpcplex2.hxx
@@ -25,16 +25,6 @@ public:
 
    // public member functions
    virtual std::string name() const;
-
-   template<class _GM>
-   struct RebindGm{
-       typedef LPCplex2<_GM, ACC_TYPE> type;
-   };
-
-   template<class _GM,class _ACC>
-   struct RebindGmAndAcc{
-       typedef LPCplex2<_GM, _ACC > type;
-   };
 };
 
 template<class GM_TYPE, class ACC_TYPE>
diff --git a/include/opengm/inference/lpgurobi.hxx b/include/opengm/inference/lpgurobi.hxx
index 8160aac..1eab43f 100644
--- a/include/opengm/inference/lpgurobi.hxx
+++ b/include/opengm/inference/lpgurobi.hxx
@@ -46,21 +46,11 @@ public:
    typedef visitors::EmptyVisitor< LPGurobi<GM, ACC> > EmptyVisitorType;
  
  
-    template<class _GM>
-    struct RebindGm{
-        typedef LPGurobi<_GM, ACC> type;
-    };
-
-    template<class _GM,class _ACC>
-    struct RebindGmAndAcc{
-        typedef LPGurobi<_GM, _ACC > type;
-    };
-
    class Parameter {
    public:
-      bool   integerConstraint_; // ILP=true, 1order-LP=false
-      int    numberOfThreads_;    // number of threads (0=autosect)
-      bool   verbose_;           // switch on/off verbode mode 
+      bool integerConstraint_; // ILP=true, 1order-LP=false
+      int numberOfThreads_;    // number of threads (0=autosect)
+      bool verbose_;           // switch on/off verbode mode 
       double cutUp_;           // upper cutoff
       double epOpt_;           // Optimality tolerance  
       double epMrk_;           // Markowitz tolerance 
@@ -71,62 +61,26 @@ public:
       double workMem_;         // maximal ammount of memory in MB used for workspace
       double treeMemoryLimit_; // maximal ammount of memory in MB used for treee
       double timeLimit_;       // maximal time in seconds the solver has
-      int    probeingLevel_;
+      int probeingLevel_;
       //int coverCutLevel_;
       //int disjunctiverCutLevel_;
       //int cliqueCutLevel_;
       //int MIRCutLevel_;
       //int presolveLevel_;
-      LP_SOLVER    rootAlg_;  
-      LP_SOLVER    nodeAlg_;
+      LP_SOLVER rootAlg_;  
+      LP_SOLVER nodeAlg_;
       MIP_EMPHASIS mipFocus_;
-      LP_PRESOLVE  presolve_;
-      MIP_CUT      cutLevel_;       // Determines whether or not to cuts for the problem and how aggressively (will be overruled by specific ones). 
-      MIP_CUT      cliqueCutLevel_; // Determines whether or not to generate clique cuts for the problem and how aggressively. 
-      MIP_CUT      coverCutLevel_;  // Determines whether or not to generate cover cuts for the problem and how aggressively. 
-      MIP_CUT      gubCutLevel_;    // Determines whether or not to generate generalized upper bound (GUB) cuts for the problem and how aggressively. 
-      MIP_CUT      mirCutLevel_;    // Determines whether or not mixed integer rounding (MIR) cuts should be generated for the problem and how aggressively.  
-      MIP_CUT      iboundCutLevel_; // Determines whether or not to generate implied bound cuts for the problem and how aggressively.
-      MIP_CUT      flowcoverCutLevel_; //Determines whether or not to generate flow cover cuts for the problem and how aggressively. 
-      MIP_CUT      flowpathCutLevel_; //Determines whether or not to generate flow path cuts for the problem and how aggressively.
-      MIP_CUT      disjunctCutLevel_; // Determines whether or not to generate disjunctive cuts for the problem and how aggressively.
-      MIP_CUT      gomoryCutLevel_; // Determines whether or not to generate gomory fractional cuts for the problem and how aggressively. 
-
-
-      template<class P>
-      Parameter(const P & p )
-      :
-        integerConstraint_(p.integerConstraint_),
-        numberOfThreads_(p.numberOfThreads_),
-        verbose_(p.verbose_),
-        cutUp_(p.cutUp_),
-        epOpt_(p.epOpt_),
-        epMrk_(p.epMrk_),
-        epRHS_(p.epRHS_),
-        epInt_(p.epInt_),
-        epAGap_(p.epAGap_),
-        epGap_(p.epGap_),
-        workMem_(p.workMem_),
-        treeMemoryLimit_(p.treeMemoryLimit_),
-        timeLimit_(p.timeLimit_),
-        probeingLevel_(p.probeingLevel_),
-        rootAlg_(p.rootAlg_),
-        nodeAlg_(p.nodeAlg_),
-        mipFocus_(p.mipFocus_),
-        presolve_(p.presolve_),
-        cutLevel_(p.cutLevel_),
-        cliqueCutLevel_(p.cliqueCutLevel_),
-        coverCutLevel_(p.coverCutLevel_),
-        gubCutLevel_(p.gubCutLevel_),
-        mirCutLevel_(p.mirCutLevel_),
-        iboundCutLevel_(p.iboundCutLevel_),
-        flowcoverCutLevel_(p.flowcoverCutLevel_),
-        flowpathCutLevel_(p.flowpathCutLevel_),
-        disjunctCutLevel_(p.disjunctCutLevel_),
-        gomoryCutLevel_(p.gomoryCutLevel_)
-      {
-
-      }
+      LP_PRESOLVE presolve_;
+      MIP_CUT cutLevel_;       // Determines whether or not to cuts for the problem and how aggressively (will be overruled by specific ones). 
+      MIP_CUT cliqueCutLevel_; // Determines whether or not to generate clique cuts for the problem and how aggressively. 
+      MIP_CUT coverCutLevel_;  // Determines whether or not to generate cover cuts for the problem and how aggressively. 
+      MIP_CUT gubCutLevel_;    // Determines whether or not to generate generalized upper bound (GUB) cuts for the problem and how aggressively. 
+      MIP_CUT mirCutLevel_;    // Determines whether or not mixed integer rounding (MIR) cuts should be generated for the problem and how aggressively.  
+      MIP_CUT iboundCutLevel_; // Determines whether or not to generate implied bound cuts for the problem and how aggressively.
+      MIP_CUT flowcoverCutLevel_; //Determines whether or not to generate flow cover cuts for the problem and how aggressively. 
+      MIP_CUT flowpathCutLevel_; //Determines whether or not to generate flow path cuts for the problem and how aggressively.
+      MIP_CUT disjunctCutLevel_; // Determines whether or not to generate disjunctive cuts for the problem and how aggressively.
+      MIP_CUT gomoryCutLevel_; // Determines whether or not to generate gomory fractional cuts for the problem and how aggressively. 
 
       /// constructor
       /// \param cutUp upper cutoff - assume that: min_x f(x) <= cutUp 
diff --git a/include/opengm/inference/lpgurobi2.hxx b/include/opengm/inference/lpgurobi2.hxx
index 192b038..a9c1a9c 100644
--- a/include/opengm/inference/lpgurobi2.hxx
+++ b/include/opengm/inference/lpgurobi2.hxx
@@ -25,16 +25,6 @@ public:
 
    // public member functions
    virtual std::string name() const;
-
-   template<class _GM>
-   struct RebindGm{
-       typedef LPGurobi2<_GM, ACC_TYPE> type;
-   };
-
-   template<class _GM,class _ACC>
-   struct RebindGmAndAcc{
-       typedef LPGurobi2<_GM, _ACC > type;
-   };
 };
 
 template<class GM_TYPE, class ACC_TYPE>
diff --git a/include/opengm/inference/lsatr.hxx b/include/opengm/inference/lsatr.hxx
index 2772356..1ddfa42 100644
--- a/include/opengm/inference/lsatr.hxx
+++ b/include/opengm/inference/lsatr.hxx
@@ -97,19 +97,7 @@ namespace opengm {
       typedef opengm::visitors::VerboseVisitor<LSA_TR<GM,ACC> > VerboseVisitorType;
       typedef opengm::visitors::EmptyVisitor<LSA_TR<GM,ACC> >  EmptyVisitorType;
       typedef opengm::visitors::TimingVisitor<LSA_TR<GM,ACC> > TimingVisitorType; 
-    
-
-        template<class _GM>
-        struct RebindGm{
-            typedef LSA_TR<_GM, ACC> type;
-        };
-
-        template<class _GM,class _ACC>
-        struct RebindGmAndAcc{
-            typedef LSA_TR<_GM, _ACC > type;
-        };
-
-
+   
       class Parameter {
       public:
          enum DISTANCE {HAMMING, EUCLIDEAN};
@@ -130,18 +118,6 @@ namespace opengm {
             reductionRatio_   = 0.25; // used to decide whether to increase or decrease lambda using the multiplier
             distance_         = EUCLIDEAN; 
          }
-
-         template<class P>
-         Parameter(const P & p)
-         :  randSeed_(p.randSeed_),
-            maxLambda_(p.maxLambda_),
-            initialLambda_(p.initialLambda_),
-            precisionLambda_(p.precisionLambda_),
-            lambdaMultiplier_(p.lambdaMultiplier_),
-            reductionRatio_(p.reductionRatio_),
-            distance_(p.distance_){
-                
-            }
       };
 
       LSA_TR(const GraphicalModelType&);
diff --git a/include/opengm/inference/messagepassing/messagepassing.hxx b/include/opengm/inference/messagepassing/messagepassing.hxx
index 56a854b..d54c332 100644
--- a/include/opengm/inference/messagepassing/messagepassing.hxx
+++ b/include/opengm/inference/messagepassing/messagepassing.hxx
@@ -64,21 +64,6 @@ public:
    /// Visitor
    typedef visitors::EmptyVisitor<MessagePassing<GM, ACC, UPDATE_RULES, DIST> > EmptyVisitorType;
 
-
-    template<class _GM>
-    struct RebindGm{
-        typedef typename UPDATE_RULES:: template RebindGm<_GM>::type UR; 
-        typedef MessagePassing<_GM, ACC, UR, DIST> type;
-    };
-
-    template<class _GM,class _ACC>
-    struct RebindGmAndAcc{
-        typedef typename UPDATE_RULES:: template RebindGmAndAcc<_GM,_ACC>::type UR; 
-        typedef MessagePassing<_GM, _ACC, UR, DIST> type;
-    };
-
-
-
    struct Parameter {
       typedef typename  UPDATE_RULES::SpecialParameterType SpecialParameterType;
       Parameter
@@ -97,21 +82,6 @@ public:
          specialParameter_(specialParameter),
          isAcyclic_(isAcyclic)
       {}
-      
-      template<class P>
-      Parameter
-      (
-         const P & p
-      )
-      :  maximumNumberOfSteps_(p.maximumNumberOfSteps_),
-         bound_(p.bound_),
-         damping_(p.damping_),
-         inferSequential_(p.inferSequential_),
-         useNormalization_(p.useNormalization_),
-         specialParameter_(p.specialParameter_),
-         isAcyclic_(p.isAcyclic_)
-      {}
-
 
       size_t maximumNumberOfSteps_;
       ValueType bound_;
diff --git a/include/opengm/inference/messagepassing/messagepassing_bp.hxx b/include/opengm/inference/messagepassing/messagepassing_bp.hxx
index c76d74f..4146588 100644
--- a/include/opengm/inference/messagepassing/messagepassing_bp.hxx
+++ b/include/opengm/inference/messagepassing/messagepassing_bp.hxx
@@ -87,17 +87,6 @@ namespace opengm {
       typedef VariableHullBP<GM, BufferType, OperatorType, ACC> VariableHullType;
       typedef meta::EmptyType SpecialParameterType;
 
-    template<class _GM>
-    struct RebindGm{
-        typedef BeliefPropagationUpdateRules<_GM, ACC, BUFFER> type;
-    };
-
-    template<class _GM,class _ACC>
-    struct RebindGmAndAcc{
-        typedef BeliefPropagationUpdateRules<_GM, _ACC, BUFFER> type;
-    };
-
-
       template<class MP_PARAM>
          static void initializeSpecialParameter(const GM& gm, MP_PARAM& mpParameter)
             {}
diff --git a/include/opengm/inference/messagepassing/messagepassing_trbp.hxx b/include/opengm/inference/messagepassing/messagepassing_trbp.hxx
index 9d16fbb..92b8de7 100644
--- a/include/opengm/inference/messagepassing/messagepassing_trbp.hxx
+++ b/include/opengm/inference/messagepassing/messagepassing_trbp.hxx
@@ -82,15 +82,6 @@ namespace opengm {
       typedef FactorHullTRBP<GM, BUFFER, OperatorType, ACC> FactorHullType;
       typedef VariableHullTRBP<GM, BUFFER, OperatorType, ACC> VariableHullType;
       typedef std::vector<ValueType> SpecialParameterType;
-        template<class _GM>
-        struct RebindGm{
-            typedef TrbpUpdateRules<_GM, ACC, BUFFER> type;
-        };
-
-        template<class _GM,class _ACC>
-        struct RebindGmAndAcc{
-            typedef TrbpUpdateRules<_GM, _ACC, BUFFER> type;
-        };
 
       template<class MP_PARAM>
       static void initializeSpecialParameter(const GM& gm,MP_PARAM& mpParameter) {
diff --git a/include/opengm/inference/movemaker.hxx b/include/opengm/inference/movemaker.hxx
index 468115c..caf3478 100644
--- a/include/opengm/inference/movemaker.hxx
+++ b/include/opengm/inference/movemaker.hxx
@@ -32,14 +32,6 @@ public:
    typedef opengm::GraphicalModel<ValueType, OperatorType, FunctionTypeList, SubGmSpace> SubGmType;
    /// \endcond
 
-
-    template<class _GM>
-    struct RebindGm{
-        typedef Movemaker<_GM> type;
-    };
-
-
-
    Movemaker(const GraphicalModelType&); 
    template<class StateIterator>
       Movemaker(const GraphicalModelType&, StateIterator); 
diff --git a/include/opengm/inference/mqpbo.hxx b/include/opengm/inference/mqpbo.hxx
index 44ee5e0..c1fd399 100644
--- a/include/opengm/inference/mqpbo.hxx
+++ b/include/opengm/inference/mqpbo.hxx
@@ -46,36 +46,11 @@ namespace opengm {
       typedef visitors::TimingVisitor<MQPBO<GM, ACC> >  TimingVisitorType; 
       typedef ValueType                       GraphValueType;
       
-        template<class _GM>
-        struct RebindGm{
-            typedef MQPBO<_GM, ACC> type;
-        };
-
-        template<class _GM,class _ACC>
-        struct RebindGmAndAcc{
-            typedef MQPBO<_GM, _ACC > type;
-        };
-
-
-
       enum PermutationType {NONE, RANDOM, MINMARG};
       
       class Parameter{
       public:
          Parameter(): useKovtunsMethod_(true), probing_(false),  strongPersistency_(false), rounds_(0), permutationType_(NONE) {};
-         
-
-        template<class P>
-        Parameter(const P & p)
-        :   label_(p.label_),
-            useKovtunsMethod_(p.useKovtunsMethod_),
-            probing_(p.probing_),
-            strongPersistency_(p.strongPersistency_),
-            rounds_(p.rounds_),
-            permutationType_(p.permutationType_){
-
-        }
-
          std::vector<LabelType> label_;
          bool useKovtunsMethod_;
          const bool probing_; //do not use this!
@@ -189,12 +164,12 @@ namespace opengm {
       }
 
       if(param_.rounds_>0){
-         //std::cout << "Large" <<std::endl;
+         std::cout << "Large" <<std::endl;
          qpbo_ = new kolmogorov::qpbo::QPBO<GraphValueType > (numNodes_, numEdges_); // max number of nodes & edges
          qpbo_->AddNode(numNodes_);
       }
       else{
-         //std::cout << "Small" <<std::endl;      
+         std::cout << "Small" <<std::endl;      
          qpbo_ = new kolmogorov::qpbo::QPBO<GraphValueType > (gm_.numberOfVariables(), numSOF); // max number of nodes & edges
          qpbo_->AddNode(gm_.numberOfVariables());
       }
@@ -845,7 +820,7 @@ namespace opengm {
 
       if(param_.useKovtunsMethod_){
          if(isPotts){
-            //std::cout << "Use Kovtuns method for potts"<<std::endl;
+            std::cout << "Use Kovtuns method for potts"<<std::endl;
             for(LabelType l=0; l<maxNumberOfLabels; ++l) {
                testQuess(l);
                double xoptimality = optimality(); 
diff --git a/include/opengm/inference/multicut.hxx b/include/opengm/inference/multicut.hxx
index 196e15c..7933d17 100644
--- a/include/opengm/inference/multicut.hxx
+++ b/include/opengm/inference/multicut.hxx
@@ -84,15 +84,7 @@ public:
    typedef visitors::VerboseVisitor<Multicut<GM,ACC> > VerboseVisitorType;
    typedef visitors::EmptyVisitor<Multicut<GM,ACC> > EmptyVisitorType;
    typedef visitors::TimingVisitor<Multicut<GM,ACC> > TimingVisitorType;
-    template<class _GM>
-    struct RebindGm{
-        typedef Multicut<_GM, ACC> type;
-    };
 
-    template<class _GM,class _ACC>
-    struct RebindGmAndAcc{
-        typedef Multicut<_GM, _ACC > type;
-    };
 
 #ifdef WITH_BOOST
    typedef  boost::unordered_map<IndexType, LPIndexType> EdgeMapType;
@@ -128,7 +120,6 @@ public:
       bool useBufferedStates_;
       bool initializeWith3Cycles_;
 
-    
       /// \param numThreads number of threads that should be used (default = 0 [automatic])
       /// \param cutUp value which the optima at least has (helps to cut search-tree)
     Parameter
@@ -136,18 +127,9 @@ public:
         int numThreads=0,
         double cutUp=1.0e+75
     )
-    :   numThreads_(numThreads),
-        verbose_(false),
-        verboseCPLEX_(false),
-        cutUp_(cutUp),
-        timeOut_(36000000),
-        maximalNumberOfConstraintsPerRound_(1000000),
-        edgeRoundingValue_(0.00000001),
-        MWCRounding_(NEAREST),
-        reductionMode_(3),
-        useOldPriorityQueue_(false),
-        useChordalSearch_(false),
-        useBufferedStates_(false),
+    :   numThreads_(numThreads), verbose_(false),verboseCPLEX_(false), cutUp_(cutUp),
+        timeOut_(36000000), maximalNumberOfConstraintsPerRound_(1000000),
+        edgeRoundingValue_(0.00000001),MWCRounding_(NEAREST), reductionMode_(3),useOldPriorityQueue_(false), useChordalSearch_(false), useBufferedStates_(false),
         initializeWith3Cycles_(false)
     {};
 
@@ -156,17 +138,10 @@ public:
     (
         const OTHER_PARAM & p
     )
-    :   numThreads_(p.numThreads_),
-        verbose_(p.verbose_),
-        verboseCPLEX_(p.verboseCPLEX_),
-        cutUp_(p.cutUp_),
-        timeOut_(p.timeOut_),
-        maximalNumberOfConstraintsPerRound_(p.maximalNumberOfConstraintsPerRound_),
-        edgeRoundingValue_(p.edgeRoundingValue_),
-        MWCRounding_(p.MWCRounding_),
-        reductionMode_(p.reductionMode_),
-        useOldPriorityQueue_(p.useOldPriorityQueue_),
-        useChordalSearch_(p.useChordalSearch_),
+    :   numThreads_(p.numThreads_), verbose_(p.verbose_),verboseCPLEX_(p.verboseCPLEX_), cutUp_(p.cutUp_),
+        timeOut_(p.timeOut_), maximalNumberOfConstraintsPerRound_(p.maximalNumberOfConstraintsPerRound_),
+        edgeRoundingValue_(p.edgeRoundingValue_),MWCRounding_(p.MWCRounding_), reductionMode_(p.reductionMode_),
+        useOldPriorityQueue_(p.useOldPriorityQueue_), useChordalSearch_(p.useChordalSearch_),
         initializeWith3Cycles_(false)
     {};
    };
diff --git a/include/opengm/inference/partition-move.hxx b/include/opengm/inference/partition-move.hxx
index b508356..0dc49a7 100644
--- a/include/opengm/inference/partition-move.hxx
+++ b/include/opengm/inference/partition-move.hxx
@@ -53,23 +53,10 @@ public:
    typedef __gnu_cxx::hash_map<IndexType, LPIndexType> EdgeMapType;
    typedef __gnu_cxx::hash_set<IndexType>              VariableSetType; 
 #endif
-
-
-    template<class _GM>
-    struct RebindGm{
-        typedef PartitionMove<_GM, ACC> type;
-    };
-
-    template<class _GM,class _ACC>
-    struct RebindGmAndAcc{
-        typedef PartitionMove<_GM, _ACC > type;
-    };
-
+ 
 
    struct Parameter{
      Parameter ( ) {};
-     template<class P>
-     Parameter (const P & p) {};
    };
 
    ~PartitionMove();
diff --git a/include/opengm/inference/qpbo.hxx b/include/opengm/inference/qpbo.hxx
index b31571c..b24d2bf 100644
--- a/include/opengm/inference/qpbo.hxx
+++ b/include/opengm/inference/qpbo.hxx
@@ -26,22 +26,7 @@ public:
    typedef visitors::TimingVisitor<QPBO<GM,MIN_ST_CUT> > TimingVisitorType;
    typedef visitors::EmptyVisitor<QPBO<GM,MIN_ST_CUT> > EmptyVisitorType;
 
-    template<class _GM>
-    struct RebindGm{
-        typedef QPBO<_GM, MIN_ST_CUT> type;
-    };
-
-    template<class _GM,class _ACC>
-    struct RebindGmAndAcc{
-        typedef QPBO<_GM ,MIN_ST_CUT> type;
-    };
-
-
-   struct Parameter{
-     Parameter ( ) {};
-     template<class P>
-     Parameter (const P & p) {};
-   };
+   struct Parameter {};
 
    QPBO(const GraphicalModelType&, Parameter = Parameter());
    std::string name() const;
diff --git a/include/opengm/inference/reducedinference.hxx b/include/opengm/inference/reducedinference.hxx
index 4ed215b..16baad6 100644
--- a/include/opengm/inference/reducedinference.hxx
+++ b/include/opengm/inference/reducedinference.hxx
@@ -84,21 +84,6 @@ namespace opengm {
     typedef visitors::TimingVisitor<ReducedInference<GM, ACC, INF> >  TimingVisitorType;
 
 
-    template<class _GM>
-    struct RebindGm{
-        typedef typename ReducedInferenceHelper<_GM>::InfGmType RebindedInfGmType;
-        typedef typename INF:: template RebindGm<RebindedInfGmType>::type RebindedInf;
-        typedef ReducedInference<_GM, ACC, RebindedInf> type;
-    };
-
-    template<class _GM,class _ACC>
-    struct RebindGmAndAcc{
-        typedef typename ReducedInferenceHelper<_GM>::InfGmType RebindedInfGmType;
-        typedef typename INF:: template RebindGmAndAcc<RebindedInfGmType,_ACC>::type RebindedInf;
-        typedef ReducedInference<_GM,_ACC, RebindedInf> type;
-    };
-
-
     class Parameter
     {
     public:
@@ -106,17 +91,6 @@ namespace opengm {
         bool Persistency_;
         bool Tentacle_;
         bool ConnectedComponents_;
-
-
-        template<class P>
-        Parameter(const P & p)
-        :   subParameter_(p.subParameter_),
-            Persistency_(p.Persistency_),
-            Tentacle_(p.Tentacle_),
-            ConnectedComponents_(p.ConnectedComponents_)
-        {
-        }
-
         Parameter(
             const bool Persistency=false,
             const bool Tentacle=false,
diff --git a/include/opengm/inference/sat.hxx b/include/opengm/inference/sat.hxx
index 8515aaa..2fdbfc4 100644
--- a/include/opengm/inference/sat.hxx
+++ b/include/opengm/inference/sat.hxx
@@ -27,23 +27,7 @@ namespace opengm {
       typedef GM GraphicalModelType;
       OPENGM_GM_TYPE_TYPEDEFS;
 
-
-        template<class _GM>
-        struct RebindGm{
-            typedef SAT<_GM> type;
-        };
-
-        template<class _GM,class _ACC>
-        struct RebindGmAndAcc{
-            typedef SAT<_GM> type;
-        };
-
-
-       struct Parameter{
-         Parameter ( ) {};
-         template<class P>
-         Parameter (const P & p) {};
-       };
+      struct Parameter {};
 
       SAT(const GraphicalModelType&, const Parameter& = Parameter());
       std::string name() const;
diff --git a/include/opengm/inference/self_fusion.hxx b/include/opengm/inference/self_fusion.hxx
index e2f282d..7f260a3 100644
--- a/include/opengm/inference/self_fusion.hxx
+++ b/include/opengm/inference/self_fusion.hxx
@@ -72,7 +72,7 @@ struct FusionVisitor{
         iteration_(0),
         fuseNth_(fuseNth),
         value_(value),
-        bound_(bound),
+                bound_(bound),
         argFromInf_(selfFusion.graphicalModel().numberOfVariables()),
         argBest_(argBest),
         argOut_(selfFusion.graphicalModel().numberOfVariables()),
@@ -280,22 +280,9 @@ public:
     typedef INFERENCE ToFuseInferenceType;
 
     enum FusionSolver{
-        QpboFusion=0,
-        CplexFusion=1,
-        LazyFlipperFusion=2
-    };
-
-
-    template<class _GM>
-    struct RebindGm{
-        typedef typename INFERENCE:: template RebindGm<_GM>::type RebindedInf;
-        typedef SelfFusion<RebindedInf> type;
-    };
-
-    template<class _GM,class _ACC>
-    struct RebindGmAndAcc{
-        typedef typename INFERENCE:: template RebindGmAndAcc<_GM, _ACC>::type RebindedInf;
-        typedef SelfFusion<RebindedInf> type;
+        QpboFusion,
+        CplexFusion,
+        LazyFlipperFusion
     };
 
 
@@ -324,32 +311,6 @@ public:
       {
 
       }
-
-      template<class P>
-      Parameter(
-        const P & p
-      )
-      : fuseNth_(p.fuseNth_),
-        fusionSolver_(),
-        infParam_(p.infParam_),
-        maxSubgraphSize_(p.maxSubgraphSize_),
-        reducedInf_(p.reducedInf_),
-        connectedComponents_(p.connectedComponents_),
-        tentacles_(p.tentacles_),
-        fusionTimeLimit_(p.fusionTimeLimit_),
-        numStopIt_(p.numStopIt_)
-      { 
-        if(p.fusionSolver_ == 0){
-            fusionSolver_ = QpboFusion;
-        }
-        else if(p.fusionSolver_ == 1){
-            fusionSolver_ = CplexFusion;
-        }
-        else if(p.fusionSolver_ == 2){
-            fusionSolver_ = LazyFlipperFusion;
-        }
-      }
-
       UInt64Type fuseNth_;
       FusionSolver fusionSolver_;
       typename INFERENCE::Parameter infParam_;
diff --git a/include/opengm/learning/bundle-optimizer.hxx b/include/opengm/learning/bundle-optimizer.hxx
deleted file mode 100644
index e04dd48..0000000
--- a/include/opengm/learning/bundle-optimizer.hxx
+++ /dev/null
@@ -1,326 +0,0 @@
-#pragma once
-#ifndef OPENGM_LEARNING_BUNDLE_OPTIMIZER_HXX
-#define OPENGM_LEARNING_BUNDLE_OPTIMIZER_HXX
-
-#include "solver/BundleCollector.h"
-#include "solver/QuadraticSolverFactory.h"
-
-namespace opengm {
-
-namespace learning {
-
-//template <typename T>
-//std::ostream& operator<<(std::ostream& out, Weights<T>& w) {
-
-//    out << "[";
-//    for (int i = 0; i < w.numberOfWeights(); i++) {
-
-//        if (i > 0)
-//            out << ", ";
-//        out << w[i];
-//    }
-//    out << "]";
-//}
-
-enum OptimizerResult {
-
-	// the minimal optimization gap was reached
-	ReachedMinGap,
-
-	// the requested number of steps was exceeded
-	ReachedSteps,
-
-	// something went wrong
-	Error
-};
-
-template <typename ValueType>
-class BundleOptimizer {
-
-public:
-
-	enum EpsStrategy {
-
-		/**
-		 * Compute the eps from the gap estimate between the lower bound and the 
-		 * target objective. The gap estimate will only be correct for oracle 
-		 * calls that perform exact inference.
-		 */
-		EpsFromGap,
-
-		/**
-		 * Compute the eps from the change of the minimum of the lower bound.  
-		 * This version does also work for approximate (but deterministic) 
-		 * inference methods.
-		 */
-		EpsFromChange
-	};
-
-	struct Parameter {
-
-		Parameter() :
-			lambda(1.0),
-			min_eps(1e-5),
-			steps(0),
-			epsStrategy(EpsFromChange) {}
-
-		// regularizer weight
-		double lambda;
-
-		// the maximal number of steps to perform, 0 = no limit
-		unsigned int steps;
-
-		// bundle method stops if eps is smaller than this value
-		ValueType min_eps;
-
-		// how to compute the eps for the stopping criterion
-		EpsStrategy epsStrategy;
-        bool verbose_;
-	};
-
-	BundleOptimizer(const Parameter& parameter = Parameter());
-
-	~BundleOptimizer();
-
-	/**
-	 * Start the bundle method optimization on the given oracle. The oracle has 
-	 * to model:
-	 *
-     *   Weights current;
-     *   Weights gradient;
-	 *   double          value;
-	 *
-	 *   valueAndGradient = oracle(current, value, gradient);
-	 *
-	 * and should return the value and gradient of the objective function 
-	 * (passed by reference) at point 'current'.
-	 */
-    template <typename Oracle, typename Weights>
-    OptimizerResult optimize(Oracle& oracle, Weights& w);
-
-private:
-
-    template <typename Weights>
-    void setupQp(const Weights& w);
-
-	template <typename ModelWeights>
-	void findMinLowerBound(ModelWeights& w, ValueType& value);
-
-	template <typename ModelWeights>
-	ValueType dot(const ModelWeights& a, const ModelWeights& b);
-
-	Parameter _parameter;
-
-	solver::BundleCollector _bundleCollector;
-
-	solver::QuadraticSolverBackend* _solver;
-};
-
-template <typename T>
-BundleOptimizer<T>::BundleOptimizer(const Parameter& parameter) :
-	_parameter(parameter),
-	_solver(0) {}
-
-template <typename T>
-BundleOptimizer<T>::~BundleOptimizer() {
-
-	if (_solver)
-		delete _solver;
-}
-
-template <typename T>
-template <typename Oracle, typename Weights>
-OptimizerResult
-BundleOptimizer<T>::optimize(Oracle& oracle, Weights& w) {
-
-	setupQp(w);
-
-	/*
-	  1. w_0 = 0, t = 0
-	  2. t++
-	  3. compute a_t = ∂L(w_t-1)/∂w
-	  4. compute b_t =  L(w_t-1) - <w_t-1,a_t>
-	  5. ℒ_t(w) = max_i <w,a_i> + b_i
-	  6. w_t = argmin λ½|w|² + ℒ_t(w)
-	  7. ε_t = min_i [ λ½|w_i|² + L(w_i) ] - [ λ½|w_t|² + ℒ_t(w_t) ]
-			   ^^^^^^^^^^^^^^^^^^^^^^^^^^^   ^^^^^^^^^^^^^^^^^^^^^^^
-				 smallest L(w) ever seen    current min of lower bound
-	  8. if ε_t > ε, goto 2
-	  9. return w_t
-	*/
-
-	T minValue     =  std::numeric_limits<T>::infinity();
-	T lastMinLower = -std::numeric_limits<T>::infinity();
-
-	unsigned int t = 0;
-
-    while (true) {
-
-		t++;
-
-        if(_parameter.verbose_)
-            std::cout << std::endl << "----------------- iteration      " << t << std::endl;
-
-        Weights w_tm1 = w;
-
-        if(_parameter.verbose_){
-            std::cout << "w: ";
-            for(size_t i=0; i<w_tm1.size(); ++i)
-                std::cout << w_tm1[i] << " ";
-            std::cout << std::endl;
-        }
-
-		// value of L at current w
-		T L_w_tm1 = 0.0;
-
-		// gradient of L at current w
-        Weights a_t(w.numberOfWeights());
-
-		// get current value and gradient
-		oracle(w_tm1, L_w_tm1, a_t);
-
-        if(_parameter.verbose_){
-            std::cout << "       L(w)              is: " << L_w_tm1 << std::endl;
-            std::cout << "∂L(w)/∂:  (";
-            for(size_t i=0; i<a_t.size(); ++i)
-                std::cout << a_t[i] << " ";
-            std::cout << ")" << std::endl;
-        }
-
-		// update smallest observed value of regularized L
-		minValue = std::min(minValue, L_w_tm1 + _parameter.lambda*0.5*dot(w_tm1, w_tm1));
-
-        if(_parameter.verbose_)
-            std::cout << " min_i L(w_i) + ½λ|w_i|² is: " << minValue << std::endl;
-
-		// compute hyperplane offset
-		T b_t = L_w_tm1 - dot(w_tm1, a_t);
-
-        if(_parameter.verbose_){
-            std::cout << "adding hyperplane: ( ";
-            for(size_t i=0; i<a_t.size(); ++i)
-                std::cout << a_t[i] << " ";
-            std::cout << ")*w + " << b_t << std::endl;
-        }
-
-		// update lower bound
-		_bundleCollector.addHyperplane(a_t, b_t);
-
-		// minimal value of lower bound
-		T minLower;
-
-        // update w and get minimal value
-		findMinLowerBound(w, minLower);
-
-        // norm of w
-        double norm = 0.0;
-        for(size_t i=0; i<w.size(); ++i)
-            norm += w[i]*w[i];
-        norm = std::sqrt(norm);
-
-        if(_parameter.verbose_){
-            std::cout << " min_w ℒ(w)   + ½λ|w|²   is: " << minLower << std::endl;
-            std::cout << " w* of ℒ(w)   + ½λ|w|²   is: (";
-            for(size_t i=0; i<w.size(); ++i)
-                std::cout << w[i] << " ";
-            std::cout << ")              normalized: (";
-            for(size_t i=0; i<w.size(); ++i)
-                std::cout << w[i]/norm << " ";
-            std::cout << ")" << std::endl;
-        }
-
-		// compute gap
-		T eps_t;
-		if (_parameter.epsStrategy == EpsFromGap)
-			eps_t = minValue - minLower;
-		else
-			eps_t = minLower - lastMinLower;
-
-		lastMinLower = minLower;
-
-        if(_parameter.verbose_)
-            std::cout  << "          ε   is: " << eps_t << std::endl;
-
-		// converged?
-		if (eps_t <= _parameter.min_eps)
-			break;
-	}
-
-	return ReachedMinGap;
-}
-
-template <typename T>
-template <typename Weights>
-void
-BundleOptimizer<T>::setupQp(const Weights& w) {
-
-	/*
-	  w* = argmin λ½|w|² + ξ, s.t. <w,a_i> + b_i ≤ ξ ∀i
-	*/
-
-	if (!_solver)
-		_solver = solver::QuadraticSolverFactory::Create();
-
-	_solver->initialize(w.numberOfWeights() + 1, solver::Continuous);
-
-	// one variable for each component of w and for ξ
-    solver::QuadraticObjective obj(w.numberOfWeights() + 1);
-
-	// regularizer
-    for (unsigned int i = 0; i < w.numberOfWeights(); i++)
-		obj.setQuadraticCoefficient(i, i, 0.5*_parameter.lambda);
-
-	// ξ
-    obj.setCoefficient(w.numberOfWeights(), 1.0);
-
-	// we minimize
-	obj.setSense(solver::Minimize);
-
-	// we are done with the objective -- this does not change anymore
-	_solver->setObjective(obj);
-}
-
-template <typename T>
-template <typename ModelWeights>
-void
-BundleOptimizer<T>::findMinLowerBound(ModelWeights& w, T& value) {
-
-	_solver->setConstraints(_bundleCollector.getConstraints());
-
-	solver::Solution x;
-	std::string msg;
-	bool optimal = _solver->solve(x, value, msg);
-
-	if (!optimal) {
-
-		std::cerr
-				<< "[BundleOptimizer] QP could not be solved to optimality: "
-				<< msg << std::endl;
-
-		return;
-	}
-
-	for (size_t i = 0; i < w.numberOfWeights(); i++)
-		w[i] = x[i];
-}
-
-template <typename T>
-template <typename ModelWeights>
-T
-BundleOptimizer<T>::dot(const ModelWeights& a, const ModelWeights& b) {
-
-	OPENGM_ASSERT(a.numberOfWeights() == b.numberOfWeights());
-
-	T d = 0.0;
-	for (size_t i = 0; i < a.numberOfWeights(); i++)
-		d += a[i]*b[i];
-
-	return d;
-}
-
-} // learning
-
-} // opengm
-
-#endif // OPENGM_LEARNING_BUNDLE_OPTIMIZER_HXX
-
diff --git a/include/opengm/learning/dataset/dataset.hxx b/include/opengm/learning/dataset/dataset.hxx
deleted file mode 100644
index e315100..0000000
--- a/include/opengm/learning/dataset/dataset.hxx
+++ /dev/null
@@ -1,234 +0,0 @@
-#pragma once
-#ifndef OPENGM_DATASET_HXX
-#define OPENGM_DATASET_HXX
-
-#include <vector>
-#include <cstdlib>
-
-#include "../../graphicalmodel/weights.hxx"
-#include "../../functions/unary_loss_function.hxx"
-#include "../loss/noloss.hxx"
-
-namespace opengm {
-   namespace datasets{
-     
-    template<class GM>
-    struct DefaultLossGm{
-
-        // make the graphical model with loss
-        typedef typename GM::SpaceType         SpaceType;
-        typedef typename GM::ValueType         ValueType;
-        typedef typename GM::IndexType         IndexType;
-        typedef typename GM::LabelType         LabelType;
-        typedef typename GM::OperatorType      OperatorType;
-        typedef typename GM::FunctionTypeList  OrgFunctionTypeList;
-
-        // extend the typelist
-        typedef typename opengm::meta::TypeListGenerator<
-            opengm::ExplicitFunction<ValueType,IndexType,LabelType>, 
-            opengm::UnaryLossFunction<ValueType,IndexType,LabelType>
-        >::type LossOnlyFunctionTypeList;
-
-        typedef typename opengm::meta::MergeTypeListsNoDuplicates<
-            OrgFunctionTypeList,LossOnlyFunctionTypeList
-        >::type CombinedList;
-        // loss graphical model type
-
-        typedef GraphicalModel<ValueType, OperatorType, CombinedList, SpaceType> type;
-    };
-
-    template<class GM, class LOSS=opengm::learning::NoLoss, class LOSS_GM = DefaultLossGm<GM> >
-    class Dataset{
-    public:
-        typedef GM                       GMType;
-
-        // generate the gm with loss here atm (THIS IS WRONG)
-        typedef typename opengm::meta::EvalIf<
-        opengm::meta::Compare<LOSS_GM, DefaultLossGm<GM> >::value,
-        DefaultLossGm<GM>,
-        meta::Self<LOSS_GM>
-        >::type GMWITHLOSS;
-
-        //typedef GM                       GMWITHLOSS;
-        typedef LOSS                     LossType;
-        typedef typename LOSS::Parameter LossParameterType;
-        typedef typename GM::ValueType   ValueType;
-        typedef typename GM::IndexType   IndexType;
-        typedef typename GM::LabelType   LabelType;
-
-
-        typedef opengm::learning::Weights<ValueType> Weights;
-        typedef opengm::learning::WeightConstraints<ValueType> WeightConstraintsType;
-
-
-        bool                          lockModel(const size_t i)               { ++count_[i]; }
-        bool                          unlockModel(const size_t i)             { OPENGM_ASSERT(count_[i]>0); --count_[i]; }
-        const GM&                     getModel(const size_t i) const          { return gms_[i]; } 
-        const GMWITHLOSS&             getModelWithLoss(const size_t i)const   { return gmsWithLoss_[i]; }
-        const LossParameterType&      getLossParameters(const size_t i)const  { return lossParams_[i]; }
-        const std::vector<LabelType>& getGT(const size_t i) const             { return gts_[i]; }
-        Weights&                      getWeights()                            { return weights_; } 
-        size_t                        getNumberOfWeights() const              { return weights_.numberOfWeights(); }
-        size_t                        getNumberOfModels() const               { return gms_.size(); } 
-
-        template<class INF>
-        ValueType                     getTotalLoss(const typename INF::Parameter& para) const;
-
-        template<class INF>
-        ValueType                     getTotalLossParallel(const typename INF::Parameter& para) const;
-
-        template<class INF>
-        ValueType                     getLoss(const typename INF::Parameter& para, const size_t i) const;
-        ValueType                     getLoss(std::vector<LabelType> conf , const size_t i) const;
-
-        Dataset(size_t numInstances);
-
-        Dataset(const Weights & weights = Weights(),const WeightConstraintsType & weightConstraints = WeightConstraintsType(),size_t numInstances=0);
-
-        //void loadAll(std::string path,std::string prefix); 
-
-        friend class DatasetSerialization;
-        // friend void loadAll<Dataset<GM,LOSS> > (const std::string datasetpath, const std::string prefix, Dataset<GM,LOSS>& ds);
-
-        //~Dataset(){
-        //    std::cout<<"KILL DATASET\n";
-        //}
-    protected:	
-        std::vector<size_t> count_;
-        std::vector<bool> isCached_;
-        std::vector<GM> gms_; 
-        std::vector<GMWITHLOSS> gmsWithLoss_; 
-        std::vector<LossParameterType> lossParams_;
-        std::vector<std::vector<LabelType> > gts_;
-        Weights weights_;
-        WeightConstraintsType weightConstraints_;
-
-
-        void buildModelWithLoss(size_t i);
-    };
-      
-
-    template<class GM, class LOSS, class LOSS_GM>
-    Dataset<GM, LOSS, LOSS_GM>::Dataset(size_t numInstances)
-    : count_(std::vector<size_t>(numInstances)),
-        isCached_(std::vector<bool>(numInstances)),
-        gms_(std::vector<GM>(numInstances)),
-        gmsWithLoss_(std::vector<GMWITHLOSS>(numInstances)),
-        lossParams_(std::vector<LossParameterType>(numInstances)),
-        gts_(std::vector<std::vector<LabelType> >(numInstances)),
-        weights_(0),
-        weightConstraints_()
-    {
-    }
-
-    template<class GM, class LOSS, class LOSS_GM>
-    Dataset<GM, LOSS, LOSS_GM>::Dataset(
-        const Weights & weights, 
-        const WeightConstraintsType & weightConstraints,
-        size_t numInstances
-    ):  count_(std::vector<size_t>(numInstances)),
-        isCached_(std::vector<bool>(numInstances)),
-        gms_(std::vector<GM>(numInstances)),
-        gmsWithLoss_(std::vector<GMWITHLOSS>(numInstances)),
-        lossParams_(std::vector<LossParameterType>(numInstances)),
-        gts_(std::vector<std::vector<LabelType> >(numInstances)),
-        weights_(weights),
-        weightConstraints_(weightConstraints)
-    {
-    }
-
-
-    template<class GM, class LOSS, class LOSS_GM>
-    template<class INF>
-    typename GM::ValueType Dataset<GM, LOSS, LOSS_GM>::getTotalLoss(const typename INF::Parameter& para) const {
-        ValueType sum=0;
-        for(size_t i=0; i<this->getNumberOfModels(); ++i) {
-            sum += this->getLoss<INF>(para, i);
-        }
-        return sum;
-    }
-    template<class GM, class LOSS, class LOSS_GM>
-    template<class INF>
-    typename GM::ValueType Dataset<GM, LOSS, LOSS_GM>::getTotalLossParallel(const typename INF::Parameter& para) const {
-        double totalLoss = 0;
-        #pragma omp parallel for reduction(+:totalLoss)  
-        for(size_t i=0; i<this->getNumberOfModels(); ++i) {
-            totalLoss = totalLoss + this->getLoss<INF>(para, i);
-        }
-        return totalLoss;
-    }
-
-    template<class GM, class LOSS, class LOSS_GM>
-    template<class INF>
-    typename GM::ValueType Dataset<GM, LOSS, LOSS_GM>::getLoss(const typename INF::Parameter& para, const size_t i) const {
-        LOSS lossFunction(lossParams_[i]);
-        const GM& gm = this->getModel(i);
-        const std::vector<typename INF::LabelType>& gt =  this->getGT(i);
-
-        std::vector<typename INF::LabelType> conf;
-        INF inf(gm,para);
-        inf.infer();
-        inf.arg(conf);
-
-        return lossFunction.loss(gm, conf.begin(), conf.end(), gt.begin(), gt.end());
-
-    }
-
-    template<class GM, class LOSS, class LOSS_GM>
-    typename GM::ValueType Dataset<GM, LOSS, LOSS_GM>::getLoss(std::vector<typename GM::LabelType> conf, const size_t i) const {
-        LOSS lossFunction(lossParams_[i]);
-        const GM& gm = this->getModel(i);
-        const std::vector<LabelType>& gt =  this->getGT(i);
-        return lossFunction.loss(gm, conf.begin(), conf.end(), gt.begin(), gt.end());
-    }
-
-
-
-
-    template<class GM, class LOSS, class LOSS_GM>
-    void Dataset<GM, LOSS, LOSS_GM>::buildModelWithLoss(size_t i){
-        OPENGM_ASSERT_OP(i, <, lossParams_.size());
-        OPENGM_ASSERT_OP(i, <, gmsWithLoss_.size());
-        OPENGM_ASSERT_OP(i, <, gms_.size());
-        OPENGM_ASSERT_OP(i, <, gts_.size());
-        //std::cout<<"copy gm\n";
-        gmsWithLoss_[i] = gms_[i];    
-        //std::cout<<"copy done\n";
-        LOSS loss(lossParams_[i]);         
-        OPENGM_CHECK_OP(gts_[i].size(),==, gmsWithLoss_[i].numberOfVariables(),"");
-        loss.addLoss(gmsWithLoss_[i], gts_[i].begin());
-    }
-
-    // template<class GM, class LOSS, class LOSS_GM>
-    // void Dataset<GM, LOSS, LOSS_GM>::loadAll(std::string datasetpath,std::string prefix){
-    //     //Load Header 
-    //     std::stringstream hss;
-    //     hss << datasetpath << "/"<<prefix<<"info.h5";
-    //     hid_t file =  marray::hdf5::openFile(hss.str());
-    //     std::vector<size_t> temp(1);
-    //     marray::hdf5::loadVec(file, "numberOfWeights", temp);
-    //     size_t numWeights = temp[0];
-    //     marray::hdf5::loadVec(file, "numberOfModels", temp);
-    //     size_t numModel = temp[0];
-    //     marray::hdf5::closeFile(file);
-
-    //     gms_.resize(numModel); 
-    //     gmsWithLoss_.resize(numModel);
-    //     gt_.resize(numModel);
-    //     weights_ = Weights(numWeights);
-    //     //Load Models and ground truth
-    //     for(size_t m=0; m<numModel; ++m){
-    //         std::stringstream ss;
-    //         ss  << datasetpath <<"/"<<prefix<<"gm_" << m <<".h5"; 
-    //         hid_t file =  marray::hdf5::openFile(ss.str()); 
-    //         marray::hdf5::loadVec(file, "gt", gt_[m]);
-    //         marray::hdf5::closeFile(file);
-    //         opengm::hdf5::load(gms_[m],ss.str(),"gm"); 
-    //         buildModelWithLoss(m);
-    //     }
-    // };
-
-}
-} // namespace opengm
-
-#endif 
diff --git a/include/opengm/learning/dataset/dataset_io.hxx b/include/opengm/learning/dataset/dataset_io.hxx
deleted file mode 100644
index e526c09..0000000
--- a/include/opengm/learning/dataset/dataset_io.hxx
+++ /dev/null
@@ -1,134 +0,0 @@
-#pragma once
-#ifndef OPENGM_DATASET_IO_HXX
-#define OPENGM_DATASET_IO_HXX
-
-#include <vector>
-#include <cstdlib>
-#include <opengm/graphicalmodel/graphicalmodel_hdf5.hxx>
-#include <opengm/opengm.hxx>
-#include "opengm/learning/loss/generalized-hammingloss.hxx"
-#include "opengm/learning/loss/hammingloss.hxx"
-#include "opengm/learning/loss/noloss.hxx"
-//#include <H5Cpp.h>
-
-namespace opengm{
-   namespace datasets{
-
-      template <class W>
-      struct WeightSetter {
-         public:
-           WeightSetter(W& w) : weights_(w) {}
-
-           template<class F>
-           void operator()(F& f) const { f.setWeights(weights_); }
-
-         private:
-           W& weights_;
-      };
-
-      class DatasetSerialization{
-      public:
-         template<class DATASET>
-         static void save(const DATASET& dataset, const std::string datasetpath, const std::string prefix=""); 
-         template<class DATASET>
-         static void loadAll(const std::string datasetpath, const std::string prefix,  DATASET& dataset);  
-      };
-
-      template<class DATASET>
-      void DatasetSerialization::save(const DATASET& dataset, const std::string datasetpath, const std::string prefix) {
-         typedef typename DATASET::GMType   GMType;
-         typedef typename DATASET::LossParameterType LossParameterType;
-         typedef typename GMType::LabelType LabelType; 
-         typedef typename GMType::ValueType ValueType;
-
-         std::vector<size_t> numWeights(1,dataset.getNumberOfWeights());
-         std::vector<size_t> numModels(1,dataset.getNumberOfModels());
-  
-         std::stringstream hss;
-         hss << datasetpath << "/"<<prefix<<"info.h5";
-         hid_t file = marray::hdf5::createFile(hss.str(), marray::hdf5::DEFAULT_HDF5_VERSION);
-         marray::hdf5::save(file,"numberOfWeights",numWeights);
-         marray::hdf5::save(file,"numberOfModels",numModels);
-         marray::hdf5::closeFile(file); 
-
-         for(size_t m=0; m<dataset.getNumberOfModels(); ++m){
-            const GMType&                 gm = dataset.getModel(m); 
-            const std::vector<LabelType>& gt = dataset.getGT(m);
-            const LossParameterType&      lossParam = dataset.getLossParameters(m);
-            std::stringstream ss;
-            ss  << datasetpath <<"/"<<prefix<<"gm_" << m <<".h5"; 
-            opengm::hdf5::save(gm, ss.str(), "gm");
-            hid_t file = marray::hdf5::openFile(ss.str(), marray::hdf5::READ_WRITE);
-
-            //marray::Vector<LabelType> mgt(gt.size());
-            //std::copy(gt.begin(), gt.end(), mgt.begin());
-
-            marray::hdf5::save(file,"gt",gt);
-            hid_t lossGrp = marray::hdf5::createGroup(file,"loss");
-
-            lossParam.save(lossGrp);
-            marray::hdf5::closeGroup(lossGrp);
-            marray::hdf5::closeFile(file);
-         }
-
-      }
-
-      template<class DATASET>
-      void DatasetSerialization::loadAll(const std::string datasetpath, const std::string prefix, DATASET& dataset) {  
-         typedef typename DATASET::GMType   GMType;
-         typedef typename GMType::LabelType LabelType; 
-         typedef typename GMType::ValueType ValueType;
-         typedef typename DATASET::LossParameterType LossParameterType;
-         
-         //Load Header 
-         std::stringstream hss;
-         hss << datasetpath << "/"<<prefix<<"info.h5";
-         hid_t file =  marray::hdf5::openFile(hss.str());
-         std::vector<size_t> temp(1);
-         marray::hdf5::loadVec(file, "numberOfWeights", temp);
-         size_t numWeights = temp[0];
-         marray::hdf5::loadVec(file, "numberOfModels", temp);
-         size_t numModel = temp[0];
-         marray::hdf5::closeFile(file);
-         
-         dataset.gms_.resize(numModel); 
-         dataset.gmsWithLoss_.resize(numModel);
-         dataset.gts_.resize(numModel);
-         dataset.lossParams_.resize(numModel);
-         dataset.count_.resize(numModel);
-         dataset.isCached_.resize(numModel);
-         dataset.weights_ = opengm::learning::Weights<ValueType>(numWeights);
-         OPENGM_ASSERT_OP(dataset.lossParams_.size(), ==, numModel);
-         WeightSetter<opengm::learning::Weights<ValueType> > wSetter(dataset.weights_);
-
-         //Load Models and ground truth
-         for(size_t m=0; m<numModel; ++m){
-            std::stringstream ss;
-            ss  << datasetpath <<"/"<<prefix<<"gm_" << m <<".h5"; 
-            hid_t file =  marray::hdf5::openFile(ss.str()); 
-            marray::hdf5::loadVec(file, "gt", dataset.gts_[m]);
-            opengm::hdf5::load(dataset.gms_[m],ss.str(),"gm");
-
-            for(size_t fi = 0; fi < dataset.gms_[m].numberOfFactors(); ++fi) {
-                dataset.gms_[m][fi].callFunctor(wSetter);
-            }
-
-            LossParameterType lossParam;
-            hid_t lossGrp = marray::hdf5::openGroup(file, "loss");
-            lossParam.load(lossGrp);
-            std::vector<std::size_t> lossId;
-            marray::hdf5::loadVec(lossGrp, "lossId", lossId);
-            OPENGM_CHECK_OP(lossId.size(), ==, 1, "");
-            OPENGM_CHECK_OP(lossParam.getLossId(), ==, lossId[0],"the dataset needs to be initialized with the same loss type as saved");
-            dataset.lossParams_[m] = lossParam;
-
-            OPENGM_CHECK_OP(dataset.gts_[m].size(), == ,dataset.gms_[m].numberOfVariables(), "");
-            marray::hdf5::closeFile(file);            
-            dataset.buildModelWithLoss(m);
-         }
-      }
-
-   }
-}
-
-#endif
diff --git a/include/opengm/learning/dataset/editabledataset.hxx b/include/opengm/learning/dataset/editabledataset.hxx
deleted file mode 100644
index 1c940b9..0000000
--- a/include/opengm/learning/dataset/editabledataset.hxx
+++ /dev/null
@@ -1,146 +0,0 @@
-#pragma once
-#ifndef OPENGM_EDITABLEDATASET_HXX
-#define OPENGM_EDITABLEDATASET_HXX
-
-#include <vector>
-#include <cstdlib>
-
-#include <opengm/learning/dataset/dataset.hxx>
-#include "../../graphicalmodel/weights.hxx"
-#include "../loss/noloss.hxx"
-
-namespace opengm {
-namespace datasets{
-
-    // template< typename Weights >
-    // struct LinkWeights{
-
-    //     Weights& w_;
-    //     LinkWeights(const Weights& w):w_(w){}
-
-    //     template<class FUNCTION>
-    //     void operator()(const FUNCTION & function)
-    //     {
-    //         function.setWeights(w_);
-    //     }
-    // };
-
-    template<class GM, class LOSS, class LOSS_GM = DefaultLossGm<GM> >
-    class EditableDataset : public Dataset<GM, LOSS, LOSS_GM>{
-    public:
-        typedef GM                     GMType;
-        typedef typename Dataset<GM, LOSS, LOSS_GM>::GMWITHLOSS   GMWITHLOSS;
-        typedef LOSS                   LossType;
-        typedef typename LOSS::Parameter LossParameterType;
-        typedef typename GM::ValueType ValueType;
-        typedef typename GM::IndexType IndexType;
-        typedef typename GM::LabelType LabelType;
-
-        typedef opengm::learning::Weights<ValueType> Weights;
-        typedef opengm::learning::WeightConstraints<ValueType> WeightConstraintsType;
-
-        typedef std::vector<LabelType> GTVector;
-
-        EditableDataset(size_t numInstances) : Dataset<GM, LOSS,LOSS_GM>(numInstances) {}
-        EditableDataset(std::vector<GM>& gms, std::vector<GTVector >& gts, std::vector<LossParameterType>& lossParams);
-
-        EditableDataset(const Weights & weights = Weights(),const WeightConstraintsType & weightConstraints = WeightConstraintsType(),size_t numInstances=0)
-        :   Dataset<GM, LOSS, LOSS_GM>(weights, weightConstraints, numInstances){
-
-        }
-
-
-        void setInstance(const size_t i, const GM& gm, const GTVector& gt, const LossParameterType& p=LossParameterType());
-        void setGT(const size_t i, const GTVector& gt);
-        void pushBackInstance(const GM& gm, const GTVector& gt, const LossParameterType& p=LossParameterType());
-        void setWeights(Weights& w);
-
-
-        void setWeightConstraints(const WeightConstraintsType & weightConstraints);
-
-    };
-
-    template<class GM, class LOSS, class LOSS_GM>
-    EditableDataset<GM, LOSS, LOSS_GM>::EditableDataset(
-        std::vector<GM>& gms,
-        std::vector<GTVector >& gts,
-        std::vector<LossParameterType>& lossParams
-    )
-    :   Dataset<GM, LOSS, LOSS_GM>(gms.size())
-    {
-        for(size_t i=0; i<gms.size(); ++i){
-        setInstance(i, gms[i], gts[i], lossParams[i]);
-        this->buildModelWithLoss(i);
-    }
-    }
-
-
-
-
-
-    template<class GM, class LOSS, class LOSS_GM>
-    void EditableDataset<GM, LOSS, LOSS_GM>::setInstance(
-        const size_t i, 
-        const GM& gm, 
-        const GTVector& gt,
-        const LossParameterType& p
-    ) {
-        OPENGM_CHECK_OP(i, <, this->gms_.size(),"");
-        OPENGM_CHECK_OP(i, <, this->gts_.size(),"");
-        OPENGM_CHECK_OP(i, <, this->lossParams_.size(),"");
-        OPENGM_CHECK_OP(i, <, this->gmsWithLoss_.size(),"");
-        this->gms_[i] = gm;
-        this->gts_[i] = gt;
-        this->lossParams_[i] = p;
-        //std::cout<<"build model with loss\n";
-        this->buildModelWithLoss(i);
-        //std::cout<<"build model with loss DONE\n";
-    }
-
-    template<class GM, class LOSS, class LOSS_GM>
-    inline void EditableDataset<GM, LOSS, LOSS_GM>::setGT(
-        const size_t i, 
-        const GTVector& gt
-    ) {
-        OPENGM_CHECK_OP(i, <, this->gts_.size(),"");
-        this->gts_[i] = gt;
-        this->buildModelWithLoss(i);
-    }
-
-    template<class GM, class LOSS, class LOSS_GM>
-    void EditableDataset<GM, LOSS, LOSS_GM>::pushBackInstance(
-        const GM& gm, 
-        const GTVector& gt, 
-        const LossParameterType& p
-    ) {
-        this->gms_.push_back(gm);
-        this->gts_.push_back(gt);
-        this->lossParams_.push_back(p);
-        this->gmsWithLoss_.resize(this->gts_.size());
-        this->isCached_.resize(this->gts_.size());
-        this->count_.resize(this->gts_.size());
-        this->buildModelWithLoss(this->gts_.size()-1);        
-        OPENGM_CHECK_OP(this->gms_.size(), ==, this->gts_.size(),"");
-        OPENGM_CHECK_OP(this->gms_.size(), ==, this->lossParams_.size(),"");
-        OPENGM_CHECK_OP(this->gms_.size(), ==, this->gmsWithLoss_.size(),"");
-    }
-
-    template<class GM, class LOSS, class LOSS_GM>
-    inline void EditableDataset<GM, LOSS, LOSS_GM>::setWeights(
-        Weights& w
-    ) {
-        this->weights_ = w;
-    }
-
-    template<class GM, class LOSS, class LOSS_GM>
-    inline void EditableDataset<GM, LOSS, LOSS_GM>::setWeightConstraints(
-        const WeightConstraintsType & weightConstraints
-    ){
-        this->weightConstraints_ = weightConstraints;
-    }
-
-
-} // namespace datasets
-} // namespace opengm
-
-#endif 
diff --git a/include/opengm/learning/dataset/testdatasets.hxx b/include/opengm/learning/dataset/testdatasets.hxx
deleted file mode 100644
index e2f1e85..0000000
--- a/include/opengm/learning/dataset/testdatasets.hxx
+++ /dev/null
@@ -1,375 +0,0 @@
-#pragma once
-#ifndef OPENGM_TESTDATASETS_HXX
-#define OPENGM_TESTDATASETS_HXX
-
-#include <vector>
-#include <cstdlib>
-
-#include <opengm/learning/dataset/dataset.hxx>
-#include <opengm/learning/dataset/editabledataset.hxx>
-#include <opengm/functions/learnable/lpotts.hxx>
-#include <opengm/functions/learnable/lweightedsum_of_functions.hxx>
-
-namespace opengm {
-   namespace datasets{
-
-      template<class GM, class LOSS>
-      class TestDataset0 : public Dataset<GM,LOSS,GM>{ 
-      public:
-         typedef GM                     GMType;
-         typedef GM                     GMWITHLOSS;
-         typedef LOSS                   LossType;
-         typedef typename GM::ValueType ValueType;
-         typedef typename GM::IndexType IndexType;
-         typedef typename GM::LabelType LabelType;
-         typedef opengm::learning::Weights<ValueType> Weights;
-
-         TestDataset0(size_t numModels=5); 
-      };
-
-      template<class GM, class LOSS>
-      class TestDataset1 : public Dataset<GM,LOSS,GM>{ 
-      public:
-         typedef GM                     GMType;
-         typedef GM                     GMWITHLOSS;
-         typedef LOSS                   LossType;
-         typedef typename GM::ValueType ValueType;
-         typedef typename GM::IndexType IndexType;
-         typedef typename GM::LabelType LabelType;
-         typedef opengm::learning::Weights<ValueType> Weights;
-
-         TestDataset1(size_t numModels=5); 
-      };
-
-
-      template<class GM, class LOSS>
-      class TestDataset2 : public Dataset<GM,LOSS,GM>{ 
-      public:
-         typedef GM                     GMType;
-         typedef GM                     GMWITHLOSS;
-         typedef LOSS                   LossType;
-         typedef typename GM::ValueType ValueType;
-         typedef typename GM::IndexType IndexType;
-         typedef typename GM::LabelType LabelType;
-         typedef opengm::learning::Weights<ValueType> Weights;
-
-         TestDataset2(size_t numModels=4); 
-      };
-
-      template<class GM, class LOSS>
-      class TestDatasetSimple : public Dataset<GM,LOSS,GM>{ 
-      public:
-         typedef GM                     GMType;
-         typedef GM                     GMWITHLOSS;
-         typedef LOSS                   LossType;
-         typedef typename GM::ValueType ValueType;
-         typedef typename GM::IndexType IndexType;
-         typedef typename GM::LabelType LabelType;
-         typedef opengm::learning::Weights<ValueType> Weights;
-
-         TestDatasetSimple(size_t numModels=1); 
-      };
-
-      template<class GM, class LOSS>
-      class EditableTestDataset : public EditableDataset<GM,LOSS,GM>{ 
-      public:
-         typedef GM                     GMType;
-         typedef GM                     GMWITHLOSS;
-         typedef LOSS                   LossType;
-         typedef typename GM::ValueType ValueType;
-         typedef typename GM::IndexType IndexType;
-         typedef typename GM::LabelType LabelType;
-         typedef opengm::learning::Weights<ValueType> Weights;
-
-         EditableTestDataset(size_t numModels=5); 
-      };
-
-//***********************************
-//** IMPL TestDataset 0
-//***********************************
-      template<class GM, class LOSS>
-      TestDataset0<GM,LOSS>::TestDataset0(size_t numModels)
-      { 
-         this->lossParams_.resize(numModels);
-         this->isCached_.resize(numModels);
-         this->count_.resize(numModels,0);
-         this->weights_ = Weights(1);
-         LabelType numberOfLabels = 2;
-         this->gts_.resize(numModels,std::vector<LabelType>(64,0));
-         for(size_t m=0;m<numModels;++m){
-            for(size_t i=16; i<48; ++i){
-               this->gts_[m][i] = 1;
-            }
-         }
-         this->gms_.resize(numModels);
-         this->gmsWithLoss_.resize(numModels);
-         for(size_t m=0; m<numModels; ++m){
-            std::srand(m);
-            for (int j = 0; j < 64; j++)
-               this->gms_[m].addVariable(2);
-            for(size_t y = 0; y < 64; ++y){ 
-               // function
-               const size_t shape[] = {numberOfLabels};
-               ExplicitFunction<ValueType> f(shape, shape + 1);
-               ValueType val = (double)(this->gts_[m][y]) + (double)(std::rand()) / (double) (RAND_MAX) * 1.5 - 0.75 ;
-               f(0) = std::fabs(val-0);
-               f(1) = std::fabs(val-1);
-               typename GM::FunctionIdentifier fid =  this->gms_[m].addFunction(f);
-
-               // factor
-               size_t variableIndices[] = {y};
-               this->gms_[m].addFactor(fid, variableIndices, variableIndices + 1);         
-            }
-          
-            opengm::functions::learnable::LPotts<ValueType,IndexType,LabelType> f(this->weights_,2,std::vector<size_t>(1,0),std::vector<ValueType>(1,1));
-            typename GM::FunctionIdentifier fid = this->gms_[m].addFunction(f);      
-            for(size_t y = 0; y < 64; ++y){ 
-               if(y + 1 < 64) { // (x, y) -- (x, y + 1)
-                  size_t variableIndices[] = {y, y+1};
-                  //sort(variableIndices, variableIndices + 2);
-                  this->gms_[m].addFactor(fid, variableIndices, variableIndices + 2);
-               }
-            }
-            this->buildModelWithLoss(m);
-         }      
-      };
-
-//***********************************
-//** IMPL TestDataset 1
-//***********************************
-      template<class GM, class LOSS>
-      TestDataset1<GM,LOSS>::TestDataset1(size_t numModels)
-      { 
-         this->lossParams_.resize(numModels);
-         this->isCached_.resize(numModels);
-         this->count_.resize(numModels,0);
-         this->weights_ = Weights(1);
-         LabelType numberOfLabels = 2;
-         this->gts_.resize(numModels,std::vector<LabelType>(64*64,0));
-         for(size_t m=0;m<numModels;++m){
-            for(size_t i=32*64; i<64*64; ++i){
-               this->gts_[m][i] = 1;
-            }
-         }
-         this->gms_.resize(numModels);
-         this->gmsWithLoss_.resize(numModels);
-         for(size_t m=0; m<numModels; ++m){
-            std::srand(m);
-            for (int j = 0; j < 64*64; j++)
-               this->gms_[m].addVariable(2);
-            for(size_t y = 0; y < 64; ++y){ 
-               for(size_t x = 0; x < 64; ++x) {
-                  // function
-                  const size_t shape[] = {numberOfLabels};
-                  ExplicitFunction<ValueType> f(shape, shape + 1);
-                  ValueType val = (double)(this->gts_[m][y*64+x]) + (double)(std::rand()) / (double) (RAND_MAX) * 1.5 - 0.75 ;
-                  f(0) = std::fabs(val-0);
-                  f(1) = std::fabs(val-1);
-                  typename GM::FunctionIdentifier fid =  this->gms_[m].addFunction(f);
-
-                  // factor
-                  size_t variableIndices[] = {y*64+x};
-                  this->gms_[m].addFactor(fid, variableIndices, variableIndices + 1);
-               }
-            }
-          
-            opengm::functions::learnable::LPotts<ValueType,IndexType,LabelType> f(this->weights_,2,std::vector<size_t>(1,0),std::vector<ValueType>(1,1));
-            typename GM::FunctionIdentifier fid = this->gms_[m].addFunction(f);      
-            for(size_t y = 0; y < 64; ++y){ 
-               for(size_t x = 0; x < 64; ++x) {
-                  if(x + 1 < 64) { // (x, y) -- (x + 1, y)
-                     size_t variableIndices[] = {y*64+x, y*64+x+1};
-                     //sort(variableIndices, variableIndices + 2);
-                     this->gms_[m].addFactor(fid, variableIndices, variableIndices + 2);
-                  }
-                  if(y + 1 < 64) { // (x, y) -- (x, y + 1)
-                     size_t variableIndices[] = {y*64+x, (y+1)*64+x};
-                     //sort(variableIndices, variableIndices + 2);
-                     this->gms_[m].addFactor(fid, variableIndices, variableIndices + 2);
-                  }
-               }    
-            }
-            this->buildModelWithLoss(m);
-         }      
-      };
-
-//***********************************
-//** IMPL TestDataset 2
-//***********************************
-      template<class GM, class LOSS>
-      TestDataset2<GM,LOSS>::TestDataset2(size_t numModels)
-      { 
-         this->lossParams_.resize(numModels);
-         this->isCached_.resize(numModels);
-         this->count_.resize(numModels,0);
-         this->weights_ = Weights(3);
-         LabelType numberOfLabels = 2;
-         this->gts_.resize(numModels,std::vector<size_t>(64*64,0));
-         for(size_t m=0;m<numModels;++m){
-            for(size_t i=32*64; i<64*64; ++i){
-               this->gts_[m][i] = 1;
-            }
-         }
-         this->gms_.resize(numModels);
-         this->gmsWithLoss_.resize(numModels);
-         for(size_t m=0; m<numModels; ++m){
-            std::srand(m);
-            for (int j = 0; j < 64*64; j++)
-               this->gms_[m].addVariable(2);
-            for(size_t y = 0; y < 64; ++y){ 
-               for(size_t x = 0; x < 64; ++x) {
-                  // function
-                  const size_t numExperts = 2;
-                  const std::vector<size_t> shape(1,numberOfLabels);
-                  std::vector<marray::Marray<ValueType> > feat(numExperts,marray::Marray<ValueType>(shape.begin(), shape.end()));
-                  ValueType val0 = (double)(this->gts_[m][y*64+x]) + (double)(std::rand()) / (double) (RAND_MAX) * 1.0 - 0.5 ;
-                  feat[0](0) = std::fabs(val0-0);
-                  feat[0](1) = std::fabs(val0-1); 
-                  ValueType val1 = (double)(this->gts_[m][y*64+x]) + (double)(std::rand()) / (double) (RAND_MAX) * 2.0 - 1.0 ;
-                  feat[1](0) = std::fabs(val1-0);
-                  feat[1](1) = std::fabs(val1-1);
-                  std::vector<size_t> wID(2);
-                  wID[0]=1;  wID[1]=2;
-                  opengm::functions::learnable::LWeightedSumOfFunctions<ValueType,IndexType,LabelType> f(shape,this->weights_, wID, feat);
-                  typename GM::FunctionIdentifier fid =  this->gms_[m].addFunction(f);
-
-                  // factor
-                  size_t variableIndices[] = {y*64+x};
-                  this->gms_[m].addFactor(fid, variableIndices, variableIndices + 1);
-               }
-            }
-          
-            opengm::functions::learnable::LPotts<ValueType,IndexType,LabelType> f(this->weights_,2,std::vector<size_t>(1,0),std::vector<ValueType>(1,1));
-            typename GM::FunctionIdentifier fid = this->gms_[m].addFunction(f);      
-            for(size_t y = 0; y < 64; ++y){ 
-               for(size_t x = 0; x < 64; ++x) {
-                  if(x + 1 < 64) { // (x, y) -- (x + 1, y)
-                     size_t variableIndices[] = {y*64+x, y*64+x+1};
-                     //sort(variableIndices, variableIndices + 2);
-                     this->gms_[m].addFactor(fid, variableIndices, variableIndices + 2);
-                  }
-                  if(y + 1 < 64) { // (x, y) -- (x, y + 1)
-                     size_t variableIndices[] = {y*64+x, (y+1)*64+x};
-                     //sort(variableIndices, variableIndices + 2);
-                     this->gms_[m].addFactor(fid, variableIndices, variableIndices + 2);
-                  }
-               }    
-            }
-            this->buildModelWithLoss(m);
-         }
-      };
-
-//***********************************
-//** Embarrassingly simple dataset
-//***********************************
-      template<class GM, class LOSS>
-      TestDatasetSimple<GM,LOSS>::TestDatasetSimple(size_t numModels)
-      { 
-         this->lossParams_.resize(numModels);
-         this->isCached_.resize(numModels);
-         this->count_.resize(numModels,0);
-         this->weights_ = Weights(2);
-         LabelType numberOfLabels = 2;
-         this->gts_.resize(numModels,std::vector<size_t>(1,0));
-         for(size_t m=0; m<numModels; ++m){
-            this->gts_[m][0] = 0;
-         }
-         this->gms_.resize(numModels);
-         this->gmsWithLoss_.resize(numModels);
-         for(size_t m=0; m<numModels; ++m){
-            std::srand(m);
-            this->gms_[m].addVariable(2);
-
-			// function
-            const size_t numExperts = 2;
-            const std::vector<size_t> shape(1,numberOfLabels);
-            std::vector<marray::Marray<ValueType> > feat(numExperts,marray::Marray<ValueType>(shape.begin(), shape.end()));
-            ValueType val0 = 0.5;
-            feat[0](0) = val0;
-            feat[0](1) = val0-1; 
-            ValueType val1 = -0.25;
-            feat[1](0) = val1;
-            feat[1](1) = val1-1;
-            std::vector<size_t> wID(2);
-            wID[0]=0;  wID[1]=1;
-            opengm::functions::learnable::LWeightedSumOfFunctions<ValueType,IndexType,LabelType> f(shape,this->weights_, wID, feat);
-            typename GM::FunctionIdentifier fid =  this->gms_[m].addFunction(f);
-
-			// factor
-            size_t variableIndices[] = {0};
-            this->gms_[m].addFactor(fid, variableIndices, variableIndices + 1);
-
-            this->buildModelWithLoss(m);
-         }
-      };
- 
-//***********************************
-//** IMPL TestDataset 2 (editable)
-//***********************************
-      template<class GM, class LOSS>
-      EditableTestDataset<GM,LOSS>::EditableTestDataset(size_t numModels)
-      { 
-         this->lossParams_.resize(numModels);
-         this->count_.resize(numModels,0);
-         this->weights_ = Weights(3);
-         LabelType numberOfLabels = 2;
-         this->gts_.resize(numModels,std::vector<size_t>(64*64,0));
-         for(size_t m=0;m<numModels;++m){
-            for(size_t i=32*64; i<64*64; ++i){
-               this->gts_[m][i] = 1;
-            }
-         }
-         this->gms_.resize(numModels);
-         this->gmsWithLoss_.resize(numModels);
-         for(size_t m=0; m<numModels; ++m){
-            std::srand(m);
-            for (int j = 0; j < 64*64; j++)
-               this->gms_[m].addVariable(2);
-            for(size_t y = 0; y < 64; ++y){ 
-               for(size_t x = 0; x < 64; ++x) {
-                  // function
-                  const size_t numExperts = 2;
-                  const std::vector<size_t> shape(1,numberOfLabels);
-                  std::vector<marray::Marray<ValueType> > feat(numExperts,marray::Marray<ValueType>(shape.begin(), shape.end()));
-                  ValueType val0 = (double)(this->gts_[m][y*64+x]) + (double)(std::rand()) / (double) (RAND_MAX) * 1.0 - 0.5 ;
-                  feat[0](0) = std::fabs(val0-0);
-                  feat[0](1) = std::fabs(val0-1); 
-                  ValueType val1 = (double)(this->gts_[m][y*64+x]) + (double)(std::rand()) / (double) (RAND_MAX) * 2.0 - 1.0 ;
-                  feat[1](0) = std::fabs(val1-0);
-                  feat[1](1) = std::fabs(val1-1);
-                  std::vector<size_t> wID(2);
-                  wID[0]=1;  wID[1]=2;
-                  opengm::functions::learnable::LWeightedSumOfFunctions<ValueType,IndexType,LabelType> f(shape,this->weights_, wID, feat);
-                  typename GM::FunctionIdentifier fid =  this->gms_[m].addFunction(f);
-
-                  // factor
-                  size_t variableIndices[] = {y*64+x};
-                  this->gms_[m].addFactor(fid, variableIndices, variableIndices + 1);
-               }
-            }
-          
-            opengm::functions::learnable::LPotts<ValueType,IndexType,LabelType> f(this->weights_,2,std::vector<size_t>(1,0),std::vector<ValueType>(1,1));
-            typename GM::FunctionIdentifier fid = this->gms_[m].addFunction(f);      
-            for(size_t y = 0; y < 64; ++y){ 
-               for(size_t x = 0; x < 64; ++x) {
-                  if(x + 1 < 64) { // (x, y) -- (x + 1, y)
-                     size_t variableIndices[] = {y*64+x, y*64+x+1};
-                     //sort(variableIndices, variableIndices + 2);
-                     this->gms_[m].addFactor(fid, variableIndices, variableIndices + 2);
-                  }
-                  if(y + 1 < 64) { // (x, y) -- (x, y + 1)
-                     size_t variableIndices[] = {y*64+x, (y+1)*64+x};
-                     //sort(variableIndices, variableIndices + 2);
-                     this->gms_[m].addFactor(fid, variableIndices, variableIndices + 2);
-                  }
-               }    
-            }
-            this->buildModelWithLoss(m);
-         }
-      };
-
-
-   }
-} // namespace opengm
-
-#endif 
diff --git a/include/opengm/learning/gradient-accumulator.hxx b/include/opengm/learning/gradient-accumulator.hxx
deleted file mode 100644
index 88e920a..0000000
--- a/include/opengm/learning/gradient-accumulator.hxx
+++ /dev/null
@@ -1,175 +0,0 @@
-#ifndef OPENGM_LEARNING_GRADIENT_ACCUMULATOR_H__
-#define OPENGM_LEARNING_GRADIENT_ACCUMULATOR_H__
-
-namespace opengm {
-namespace learning {
-
-/**
- * Model function visitor to accumulate the gradient for each model weight, 
- * given a configuration.
- */
-template <typename ModelWeights, typename ConfigurationType>
-class GradientAccumulator {
-    typedef typename ConfigurationType::const_iterator ConfIter;
-
-public:
-
-    /**
-     * How to accumulate the gradient on the provided ModelWeights.
-     */
-    enum Mode {
-
-        Add,
-
-        Subtract
-    };
-
-    /**
-     * @param gradient
-     *              ModelWeights reference to store the gradients. Gradient 
-     *              values will only be added (or subtracted, if mode == 
-     *              Subtract), so you have to make sure gradient is properly 
-     *              initialized to zero.
-     *
-     * @param configuration
-     *              Configuration of the variables in the model, to evaluate the 
-     *              gradient for.
-     *
-     * @param mode
-     *              Add or Subtract the weight gradients from gradient.
-     */
-    GradientAccumulator(ModelWeights& gradient, const ConfigurationType& configuration, Mode mode = Add) :
-        _gradient(gradient),
-        _configuration(configuration),
-        _mode(mode) {}
-
-    template <typename Iterator, typename FunctionType>
-    void operator()(Iterator begin, Iterator end, const FunctionType& function) {
-
-        typedef opengm::SubsetAccessor<Iterator, ConfIter> Accessor;
-        typedef opengm::AccessorIterator<Accessor, true> Iter;
-        const Accessor accessor(begin, end, _configuration.begin());
-
-        for (int i = 0; i < function.numberOfWeights(); i++) {
-
-            int index = function.weightIndex(i);
-            double g = function.weightGradient(i, Iter(accessor, 0));
-            if (_mode == Add)
-                _gradient[index] += g;
-            else
-                _gradient[index] -= g;
-        }
-    }
-
-private:
-
-    ModelWeights& _gradient;
-    const ConfigurationType& _configuration;
-    Mode _mode;
-};
-
-
-template<class GM, class LABEL_ITER>
-struct FeatureAccumulator{
-
-    typedef typename GM::LabelType LabelType;
-    typedef typename GM::IndexType IndexType;
-    typedef typename GM::ValueType ValueType;
-    
-
-
-    FeatureAccumulator(const size_t nW, bool add = true)
-    :   accWeights_(nW),
-        gtLabel_(),
-        mapLabel_(),
-        add_(add),
-        weight_(1.0)
-        {
-
-        for(size_t i=0; i<accWeights_.size(); ++i){
-            accWeights_[i] = 0.0;
-        }
-    }
-
-    void setLabels(const LABEL_ITER gtLabel, const LABEL_ITER mapLabel){
-        gtLabel_ = gtLabel;
-        mapLabel_  = mapLabel;
-    }
-
-    void resetWeights(){
-        for(size_t i=0; i<accWeights_.size(); ++i){
-            accWeights_[i] = 0.0;
-        }
-    }
-    const Weights<double> &  getWeights(const size_t wi)const{
-        accWeights_;
-    }
-    double getWeight(const size_t wi)const{
-        return accWeights_[wi];
-    }
-    template<class Iter, class F>
-    void operator()(Iter begin, Iter end, const F & f){
-
-        typedef opengm::SubsetAccessor<Iter, LABEL_ITER> Accessor;
-        typedef opengm::AccessorIterator<Accessor, true> AccessorIter;
-
-        // get the number of weights_
-        const size_t nWeights = f.numberOfWeights();
-        if(nWeights>0){
-            // loop over all weights
-            for(size_t wi=0; wi<nWeights; ++wi){
-                // accumulate features for both labeling
-                const size_t gwi = f.weightIndex(wi);
-
-
-                const Accessor accessorGt(begin, end, gtLabel_);
-                const Accessor accessorMap(begin, end, mapLabel_);
-
-                if(add_){
-                    // for gt label
-                    accWeights_[gwi] += weight_*f.weightGradient(wi, AccessorIter(accessorGt, 0));
-                    // for test label
-                    accWeights_[gwi] -= weight_*f.weightGradient(wi, AccessorIter(accessorMap, 0));
-                }
-                else{
-                    // for gt label
-                    accWeights_[gwi] -= weight_*f.weightGradient(wi, AccessorIter(accessorGt, 0));
-                    // for test label
-                    accWeights_[gwi] += weight_*f.weightGradient(wi, AccessorIter(accessorMap, 0));
-                }
-            }
-        }
-    }
-
-    void accumulateFromOther(const FeatureAccumulator & otherAcc){
-        for(size_t i=0; i<accWeights_.size(); ++i){
-            accWeights_[i] += otherAcc.accWeights_[i];
-        }
-    }
-
-    void accumulateModelFeatures(
-        const GM & gm, 
-        const LABEL_ITER & gtLabel,
-        const LABEL_ITER & mapLabel,
-        const double weight  = 1.0
-    ){
-        gtLabel_ = gtLabel;
-        mapLabel_  = mapLabel;
-        weight_ = weight;
-        // iterate over all factors
-        // and accumulate features
-        for(size_t fi=0; fi<gm.numberOfFactors(); ++fi){
-            gm[fi].callViFunctor(*this);
-        }
-    }
-    opengm::learning::Weights<double>  accWeights_;
-    LABEL_ITER gtLabel_;
-    LABEL_ITER mapLabel_;
-    bool add_;
-    double weight_;
-};
-
-}} // namespace opengm::learning
-
-#endif // OPENGM_LEARNING_GRADIENT_ACCUMULATOR_H__
-
diff --git a/include/opengm/learning/gridsearch-learning.hxx b/include/opengm/learning/gridsearch-learning.hxx
deleted file mode 100644
index 750c844..0000000
--- a/include/opengm/learning/gridsearch-learning.hxx
+++ /dev/null
@@ -1,126 +0,0 @@
-#pragma once
-#ifndef OPENGM_GRIDSEARCH_LEARNER_HXX
-#define OPENGM_GRIDSEARCH_LEARNER_HXX
-
-#include <vector>
-
-namespace opengm {
-   namespace learning {
-
-      
-      template<class DATASET>
-      class GridSearchLearner
-      {
-      public: 
-         typedef DATASET DatasetType;
-         typedef typename DATASET::GMType   GMType; 
-         typedef typename DATASET::LossType LossType;
-         typedef typename GMType::ValueType ValueType;
-         typedef typename GMType::IndexType IndexType;
-         typedef typename GMType::LabelType LabelType; 
-
-         class Parameter{
-         public:
-            std::vector<double> parameterUpperbound_; 
-            std::vector<double> parameterLowerbound_;
-            std::vector<size_t> testingPoints_;
-            Parameter(){;}
-         };
-
-
-         GridSearchLearner(DATASET&, const Parameter& );
-
-         template<class INF>
-         void learn(const typename INF::Parameter& para); 
-         //template<class INF, class VISITOR>
-         //void learn(typename INF::Parameter para, VITITOR vis);
-
-         const opengm::learning::Weights<double>& getWeights(){return weights_;}
-         Parameter& getLerningParameters(){return para_;}
-
-      private:
-         DATASET& dataset_;
-         opengm::learning::Weights<double> weights_;
-         Parameter para_;
-      }; 
-
-      template<class DATASET>
-      GridSearchLearner<DATASET>::GridSearchLearner(DATASET& ds, const Parameter& p )
-         : dataset_(ds), para_(p)
-      {
-         weights_ = opengm::learning::Weights<double>(ds.getNumberOfWeights());
-         if(para_.parameterUpperbound_.size() != ds.getNumberOfWeights())
-            para_.parameterUpperbound_.resize(ds.getNumberOfWeights(),10.0);
-         if(para_.parameterLowerbound_.size() != ds.getNumberOfWeights())
-            para_.parameterLowerbound_.resize(ds.getNumberOfWeights(),0.0);
-         if(para_.testingPoints_.size() != ds.getNumberOfWeights())
-            para_.testingPoints_.resize(ds.getNumberOfWeights(),10);
-      }
-
-
-      template<class DATASET>
-      template<class INF>
-      void GridSearchLearner<DATASET>::learn(const typename INF::Parameter& para){
-         // generate model Parameters
-         opengm::learning::Weights<double> modelPara( dataset_.getNumberOfWeights() );
-         opengm::learning::Weights<double> bestModelPara( dataset_.getNumberOfWeights() );
-         double bestLoss = std::numeric_limits<double>::infinity();
-         std::vector<size_t> itC(dataset_.getNumberOfWeights(),0);
-         
-         bool search=true;
-         while(search){
-            // Get Parameter
-            for(size_t p=0; p<dataset_.getNumberOfWeights(); ++p){
-               modelPara.setWeight(p, para_.parameterLowerbound_[p] + double(itC[p])/double(para_.testingPoints_[p]-1)*(para_.parameterUpperbound_[p]-para_.parameterLowerbound_[p]) );
-            }
-            // Evaluate Loss
-            opengm::learning::Weights<double>& mp =  dataset_.getWeights();
-            mp = modelPara;
-            const double loss = dataset_. template getTotalLoss<INF>(para);
-           
-
-            // **************
-
-            if(loss<bestLoss){
-                 // *call visitor*
-                for(size_t p=0; p<dataset_.getNumberOfWeights(); ++p){
-                   std::cout << modelPara[p] <<" ";
-                }
-                std::cout << " ==> ";
-                std::cout << loss << std::endl;
-
-                bestLoss=loss;
-                bestModelPara=modelPara;
-                if(loss<=0.000000001){
-                    search = false;
-                }
-            }
-            //Increment Parameter
-            for(size_t p=0; p<dataset_.getNumberOfWeights(); ++p){
-               if(itC[p]<para_.testingPoints_[p]-1){
-                  ++itC[p];
-                  break;
-               }
-               else{
-                  itC[p]=0;
-                  if (p==dataset_.getNumberOfWeights()-1)
-                     search = false; 
-               }             
-            }
-         }
-         std::cout << "Best"<<std::endl;
-         for(size_t p=0; p<dataset_.getNumberOfWeights(); ++p){
-            std::cout << bestModelPara[p] <<" ";
-         }
-         std::cout << " ==> ";
-         std::cout << bestLoss << std::endl;
-         weights_ = bestModelPara;
-
-         // save best weights in dataset
-         for(size_t p=0; p<dataset_.getNumberOfWeights(); ++p){
-            dataset_.getWeights().setWeight(p, weights_[p]);
-         }
-      };
-   }
-}
-#endif
diff --git a/include/opengm/learning/loss/flexibleloss.hxx b/include/opengm/learning/loss/flexibleloss.hxx
deleted file mode 100644
index bad9466..0000000
--- a/include/opengm/learning/loss/flexibleloss.hxx
+++ /dev/null
@@ -1,305 +0,0 @@
-#pragma once
-#ifndef OPENGM_FLEXIBLE_LOSS_HXX
-#define OPENGM_FLEXIBLE_LOSS_HXX
-
-#include "opengm/functions/explicit_function.hxx"
-#include "opengm/graphicalmodel/graphicalmodel_hdf5.hxx"
-#include "hdf5.h"
-
-namespace opengm {
-namespace learning {
-
-/**
- * The generalized Hamming Loss incurs a penalty of nodeLossMultiplier[n] * labelLossMultiplier[l]
- * for node n taking label l, only if l is the same label as in the ground truth this amounts to zero.
- * One can imagine the overall cost matrix as outer product nodeLossMultiplier * labelLossMultiplier,
- * with zeros where the node label equals the ground truth.
- **/
-class FlexibleLoss{
-public:
-    class Parameter{
-    public:
-
-        enum LossType{
-            Hamming = 0 ,
-            L1 = 1,
-            L2 = 2,
-            Partition = 3,
-            ConfMat = 4
-        };
-
-        Parameter(){
-            lossType_ = Hamming;
-        }
-
-
-        bool operator==(const FlexibleLoss & other) const{
-            throw opengm::RuntimeError("do not call me");
-        }
-        bool operator<(const FlexibleLoss & other) const{
-            throw opengm::RuntimeError("do not call me");    
-        }
-        bool operator>(const FlexibleLoss & other) const{
-            throw opengm::RuntimeError("do not call me");
-        }
-        double getNodeLossMultiplier(const size_t i) const;
-        double getLabelLossMultiplier(const size_t i) const;
-        double getFactorLossMultiplier(const size_t i) const;
-        double getLabelConfMatMultiplier(const size_t l, const size_t lgt)const;
-        /**
-         * serializes the parameter object to the given hdf5 group handle;
-         * the group must contain a dataset "lossType" containing the
-         * loss type as a string
-         **/
-        void save(hid_t& groupHandle) const;
-        void load(const hid_t& groupHandle);
-        static std::size_t getLossId() { return lossId_; }
-
-        LossType lossType_;
-        std::vector<double>     nodeLossMultiplier_;
-        std::vector<double>     labelLossMultiplier_;
-        std::vector<double>     factorMultipier_;
-        marray::Marray<double>  confMat_;
-        
-
-
-    private:
-        static const std::size_t lossId_ = 16006;
-
-    };
-
-
-public:
-    FlexibleLoss(const Parameter& param = Parameter()) : param_(param){}
-
-    template<class GM, class IT1, class IT2>
-            double loss(const GM & gm, IT1 labelBegin, IT1 labelEnd, IT2 GTBegin,IT2 GTEnd) const;
-
-    template<class GM, class IT>
-    void addLoss(GM& gm, IT GTBegin) const;
-
-private:
-    Parameter param_;
-};
-
-inline double FlexibleLoss::Parameter::getNodeLossMultiplier(const size_t i) const {
-    if(i >= this->nodeLossMultiplier_.size()) {
-        return 1.;
-    }
-    return this->nodeLossMultiplier_[i];
-}
-
-inline double FlexibleLoss::Parameter::getFactorLossMultiplier(const size_t i) const {
-    if(i >= this->factorMultipier_.size()) {
-        return 1.;
-    }
-    return this->factorMultipier_[i];
-}
-
-inline double FlexibleLoss::Parameter::getLabelLossMultiplier(const size_t i) const {
-    if(i >= this->labelLossMultiplier_.size()) {
-        return 1.;
-    }
-    return this->labelLossMultiplier_[i];
-}
-
-inline double FlexibleLoss::Parameter::getLabelConfMatMultiplier(const size_t l, const size_t lgt)const{
-    if(l<confMat_.shape(0) && lgt<confMat_.shape(1)){
-        return confMat_(l, lgt);
-    }
-    return 1.0;
-}
-
-inline void FlexibleLoss::Parameter::save(hid_t& groupHandle) const {
-    std::vector<std::size_t> name;
-    name.push_back(this->getLossId());
-    marray::hdf5::save(groupHandle,"lossId",name);
-
-
-    std::vector<size_t> lossType(1, size_t(lossType_));
-    marray::hdf5::save(groupHandle,"lossType",lossType);
-
-    if (this->factorMultipier_.size() > 0) {
-        marray::hdf5::save(groupHandle,"factorLossMultiplier",this->factorMultipier_);
-    }
-    if (this->nodeLossMultiplier_.size() > 0) {
-        marray::hdf5::save(groupHandle,"nodeLossMultiplier",this->nodeLossMultiplier_);
-    }
-    if (this->labelLossMultiplier_.size() > 0) {
-        marray::hdf5::save(groupHandle,"labelLossMultiplier",this->labelLossMultiplier_);
-    }
-}
-
-inline void FlexibleLoss::Parameter::load(const hid_t& groupHandle) {
-
-    std::cout<<"load loss type \n";
-    std::vector<size_t> lossType;
-    marray::hdf5::loadVec(groupHandle, "lossType", lossType);
-    if(lossType[0] == size_t(Hamming)){
-        lossType_ = Hamming;
-    }
-    else if(lossType[0] == size_t(L1)){
-        lossType_ = L1;
-    }
-    else if(lossType[0] == size_t(L1)){
-        lossType_ = L1;
-    }
-    else if(lossType[0] == size_t(L2)){
-        lossType_ = L2;
-    }
-    else if(lossType[0] == size_t(Partition)){
-        lossType_ = Partition;
-    }
-    else if(lossType[0] == size_t(ConfMat)){
-        lossType_ = ConfMat;
-    }
-
-    
-    if (H5Lexists(groupHandle, "nodeLossMultiplier", H5P_DEFAULT)) {
-        marray::hdf5::loadVec(groupHandle, "nodeLossMultiplier", this->nodeLossMultiplier_);
-    } 
-    else {
-        //std::cout << "nodeLossMultiplier of FlexibleLoss not found, setting default values" << std::endl;
-    }
-
-    //std::cout<<"load factorLossMultiplier \n";
-    if (H5Lexists(groupHandle, "factorLossMultiplier", H5P_DEFAULT)  ) {
-        marray::hdf5::loadVec(groupHandle, "factorLossMultiplier", this->factorMultipier_);
-    } 
-    else {
-        //std::cout << "factorLossMultiplier of FlexibleLoss not found, setting default values" << std::endl;
-    }
-
-    //std::cout<<"load labelLossMultiplier \n";
-    if (H5Lexists(groupHandle, "labelLossMultiplier", H5P_DEFAULT) ) {
-        marray::hdf5::loadVec(groupHandle, "labelLossMultiplier", this->labelLossMultiplier_);
-    } 
-    else {
-        //std::cout << "labelLossMultiplier of FlexibleLoss not found, setting default values" << std::endl;
-    }
-}
-
-template<class GM, class IT1, class IT2>
-double FlexibleLoss::loss(const GM & gm, IT1 labelBegin, const IT1 labelEnd, IT2 GTBegin, const IT2 GTEnd) const
-{
-    typedef typename  GM::LabelType LabelType;
-    typedef typename  GM::IndexType IndexType;
-    typedef typename  GM::ValueType ValueType;
-
-    double loss = 0.0;
-    size_t nodeIndex = 0;
-    if(param_.lossType_ == Parameter::Hamming){
-        for(; labelBegin!= labelEnd; ++labelBegin, ++GTBegin, ++nodeIndex){
-            if(*labelBegin != *GTBegin){            
-                loss += param_.getNodeLossMultiplier(nodeIndex) * param_.getLabelLossMultiplier(*labelBegin);
-            }
-        }
-    }
-    else if(param_.lossType_ == Parameter::L1 || param_.lossType_ == Parameter::L2){
-        const size_t norm = param_.lossType_ == Parameter::L1 ? 1 : 2;
-        for(; labelBegin!= labelEnd; ++labelBegin, ++GTBegin, ++nodeIndex){
-            if(*labelBegin != *GTBegin){            
-                loss += param_.getNodeLossMultiplier(nodeIndex) * std::pow(std::abs(*GTBegin - *labelBegin), norm);
-            }
-        }
-    }
-    else if(param_.lossType_ == Parameter::ConfMat){
-        throw opengm::RuntimeError("ConfMat Loss is not yet implemented");
-    }
-    else if(param_.lossType_ == Parameter::Partition){
-
-        const size_t nFac = gm.numberOfFactors();
-
-        for(size_t fi=0; fi<nFac; ++fi){
-            const size_t nVar = gm[fi].numberOfVariables();
-            OPENGM_CHECK_OP(nVar,==,2,"Partition / Multicut Loss  is only allowed if the graphical model has only"
-                                      " second order factors (this might be changed in the future");
-            const IndexType vis[2] = { gm[fi].variableIndex(0), gm[fi].variableIndex(1)};
-            const LabelType nl[2]  = { gm.numberOfLabels(vis[0]), gm.numberOfLabels(vis[1])};
-            const double facVal = param_.getFactorLossMultiplier(fi);
-            // in the gt they are in the same cluster
-            if( (GTBegin[vis[0]] == GTBegin[vis[1]]) !=
-                (labelBegin[vis[0]] == labelBegin[vis[1]])  ){
-                loss +=facVal;
-            }
-        }
-    }
-    else{
-        throw opengm::RuntimeError("INTERNAL ERROR: unknown Loss Type");
-    }
-    return loss;
-}
-
-template<class GM, class IT>
-void FlexibleLoss::addLoss(GM& gm, IT gt) const
-{
-    typedef typename  GM::LabelType LabelType;
-    typedef typename  GM::IndexType IndexType;
-    typedef typename  GM::ValueType ValueType;
-    typedef opengm::ExplicitFunction<ValueType, IndexType,  LabelType>  ExplicitFunction;
-    typedef opengm::PottsFunction<ValueType, IndexType,  LabelType>  Potts;
-
-    if(param_.lossType_ == Parameter::Hamming){
-        for(IndexType i=0; i<gm.numberOfVariables(); ++i){
-            LabelType numL = gm.numberOfLabels(i);
-            ExplicitFunction f(&numL, &numL+1, 0);
-            for(LabelType l = 0; l < numL; ++l){
-                f(l) = - param_.getNodeLossMultiplier(i) * param_.getLabelLossMultiplier(l);
-            }
-            f(*gt) = 0;
-            ++gt;
-            gm.addFactor(gm.addFunction(f), &i, &i+1);     
-        }
-    }
-    else if(param_.lossType_ == Parameter::L1 || param_.lossType_ == Parameter::L2){
-        const size_t norm = param_.lossType_ == Parameter::L1 ? 1 : 2;
-        for(IndexType i=0; i<gm.numberOfVariables(); ++i){
-            LabelType numL = gm.numberOfLabels(i);
-            ExplicitFunction f(&numL, &numL+1, 0);
-            const LabelType gtL = *gt;
-            for(LabelType l = 0; l < numL; ++l){
-                f(l) = - param_.getNodeLossMultiplier(i) * std::pow(std::abs(gtL - l), norm);
-            }
-            f(*gt) = 0;
-            ++gt;
-            gm.addFactor(gm.addFunction(f), &i, &i+1);     
-        }
-    }
-    else if(param_.lossType_ == Parameter::ConfMat){
-        throw opengm::RuntimeError("ConfMat Loss is not yet implemented");
-    }
-    else if(param_.lossType_ == Parameter::Partition){
-
-        const size_t nFactorsInit = gm.numberOfFactors();
-
-        for(size_t fi=0; fi<nFactorsInit; ++fi){
-            const size_t nVar = gm[fi].numberOfVariables();
-            OPENGM_CHECK_OP(nVar,==,2,"Partition / Multicut Loss  is only allowed if the graphical model has only"
-                                      " second order factors (this might be changed in the future");
-
-            const IndexType vis[2] = { gm[fi].variableIndex(0), gm[fi].variableIndex(1)};
-            const LabelType nl[2]  = { gm.numberOfLabels(vis[0]), gm.numberOfLabels(vis[1])};
-
-            const double facVal = param_.getFactorLossMultiplier(fi);
-
-            // in the gt they are in the same cluster
-            if(gt[vis[0]] == gt[vis[1]]){
-                Potts pf(nl[0],nl[1], 0.0, -1.0*facVal);
-                gm.addFactor(gm.addFunction(pf), vis,vis+2);
-            }
-            // in the gt they are in different clusters
-            else{
-                Potts pf(nl[0],nl[1], -1.0*facVal, 0.0);
-                gm.addFactor(gm.addFunction(pf), vis,vis+2);
-            }
-        }
-    }
-    else{
-        throw opengm::RuntimeError("INTERNAL ERROR: unknown Loss Type");
-    }
-}
-
-} // namespace learning
-} // namespace opengm
-
-#endif 
diff --git a/include/opengm/learning/loss/generalized-hammingloss.hxx b/include/opengm/learning/loss/generalized-hammingloss.hxx
deleted file mode 100644
index e19d945..0000000
--- a/include/opengm/learning/loss/generalized-hammingloss.hxx
+++ /dev/null
@@ -1,152 +0,0 @@
-#pragma once
-#ifndef OPENGM_GENERALIZED_HAMMING_LOSS_HXX
-#define OPENGM_GENERALIZED_HAMMING_LOSS_HXX
-
-#include "opengm/functions/explicit_function.hxx"
-#include "opengm/graphicalmodel/graphicalmodel_hdf5.hxx"
-#include "hdf5.h"
-
-namespace opengm {
-namespace learning {
-
-/**
- * The generalized Hamming Loss incurs a penalty of nodeLossMultiplier[n] * labelLossMultiplier[l]
- * for node n taking label l, only if l is the same label as in the ground truth this amounts to zero.
- * One can imagine the overall cost matrix as outer product nodeLossMultiplier * labelLossMultiplier,
- * with zeros where the node label equals the ground truth.
- **/
-class GeneralizedHammingLoss{
-public:
-    class Parameter{
-    public:
-        double getNodeLossMultiplier(const size_t i) const;
-        double getLabelLossMultiplier(const size_t i) const;
-
-
-        bool operator==(const GeneralizedHammingLoss & other) const{
-                return nodeLossMultiplier_ == labelLossMultiplier_;
-        }
-        bool operator<(const GeneralizedHammingLoss & other) const{
-                return nodeLossMultiplier_ < labelLossMultiplier_;
-        }
-        bool operator>(const GeneralizedHammingLoss & other) const{
-                return nodeLossMultiplier_ > labelLossMultiplier_;
-        }
-
-        /**
-         * serializes the parameter object to the given hdf5 group handle;
-         * the group must contain a dataset "lossType" containing the
-         * loss type as a string
-         **/
-        void save(hid_t& groupHandle) const;
-        void load(const hid_t& groupHandle);
-        static std::size_t getLossId() { return lossId_; }
-
-
-        std::vector<double> nodeLossMultiplier_;
-        std::vector<double> labelLossMultiplier_;
-
-
-    private:
-        static const std::size_t lossId_ = 16001;
-
-    };
-
-
-public:
-    GeneralizedHammingLoss(const Parameter& param = Parameter()) : param_(param){}
-
-    template<class GM, class IT1, class IT2>
-            double loss(const GM & gm, IT1 labelBegin, IT1 labelEnd, IT2 GTBegin,IT2 GTEnd) const;
-
-    template<class GM, class IT>
-    void addLoss(GM& gm, IT GTBegin) const;
-
-private:
-    Parameter param_;
-};
-
-inline double GeneralizedHammingLoss::Parameter::getNodeLossMultiplier(const size_t i) const {
-    if(i >= this->nodeLossMultiplier_.size()) {
-        return 1.;
-    }
-    return this->nodeLossMultiplier_[i];
-}
-
-inline double GeneralizedHammingLoss::Parameter::getLabelLossMultiplier(const size_t i) const {
-    if(i >= this->labelLossMultiplier_.size()) {
-        return 1.;
-    }
-    return this->labelLossMultiplier_[i];
-}
-
-inline void GeneralizedHammingLoss::Parameter::save(hid_t& groupHandle) const {
-    std::vector<std::size_t> name;
-    name.push_back(this->getLossId());
-    marray::hdf5::save(groupHandle,"lossId",name);
-
-    if (this->nodeLossMultiplier_.size() > 0) {
-        marray::hdf5::save(groupHandle,"nodeLossMultiplier",this->nodeLossMultiplier_);
-    }
-    if (this->labelLossMultiplier_.size() > 0) {
-        marray::hdf5::save(groupHandle,"labelLossMultiplier",this->labelLossMultiplier_);
-    }
-}
-
-inline void GeneralizedHammingLoss::Parameter::load(const hid_t& groupHandle) {
-    if (H5Dopen(groupHandle, "nodeLossMultiplier", H5P_DEFAULT) >= 0) {
-        marray::hdf5::loadVec(groupHandle, "nodeLossMultiplier", this->nodeLossMultiplier_);
-    } else {
-        std::cout << "nodeLossMultiplier of GeneralizedHammingLoss not found, setting default values" << std::endl;
-    }
-
-    if (H5Dopen(groupHandle, "labelLossMultiplier", H5P_DEFAULT) >= 0) {
-        marray::hdf5::loadVec(groupHandle, "labelLossMultiplier", this->labelLossMultiplier_);
-    } else {
-        std::cout << "labelLossMultiplier of GeneralizedHammingLoss not found, setting default values" << std::endl;
-    }
-}
-
-template<class GM, class IT1, class IT2>
-double GeneralizedHammingLoss::loss(const GM & gm, IT1 labelBegin, const IT1 labelEnd, IT2 GTBegin, const IT2 GTEnd) const
-{
-    double loss = 0.0;
-    size_t nodeIndex = 0;
-
-    for(; labelBegin!= labelEnd; ++labelBegin, ++GTBegin, ++nodeIndex){
-        if(*labelBegin != *GTBegin){            
-            loss += param_.getNodeLossMultiplier(nodeIndex) * param_.getLabelLossMultiplier(*labelBegin);
-        }
-    }
-    return loss;
-}
-
-template<class GM, class IT>
-void GeneralizedHammingLoss::addLoss(GM& gm, IT gt) const
-{
-    //std::cout<<"start to add loss\n";
-    for(typename GM::IndexType i=0; i<gm.numberOfVariables(); ++i){
-        //std::cout<<"   vi"<<i<<"\n";
-        typename GM::LabelType numL = gm.numberOfLabels(i);
-        //std::cout<<"   vi numL"<<numL<<"\n";
-        opengm::ExplicitFunction<typename GM::ValueType,typename GM::IndexType, typename GM::LabelType> f(&numL, &numL+1, 0);
-
-        //std::cout<<"   apply multiplier\n";
-        for(typename GM::LabelType l = 0; l < numL; ++l){
-            f(l) = - param_.getNodeLossMultiplier(i) * param_.getLabelLossMultiplier(l);
-        }
-
-        f(*gt) = 0;
-        //std::cout<<"   increment\n";
-        ++gt;
-        //std::cout<<"   add\n";
-        gm.addFactor(gm.addFunction(f), &i, &i+1);
-        //std::cout<<"   next\n";
-    }
-    //std::cout<<"end add loss\n";
-}
-
-} // namespace learning
-} // namespace opengm
-
-#endif 
diff --git a/include/opengm/learning/loss/hammingloss.hxx b/include/opengm/learning/loss/hammingloss.hxx
deleted file mode 100644
index 47e272e..0000000
--- a/include/opengm/learning/loss/hammingloss.hxx
+++ /dev/null
@@ -1,81 +0,0 @@
-#pragma once
-#ifndef OPENGM_HAMMING_LOSS_HXX
-#define OPENGM_HAMMING_LOSS_HXX
-
-#include "opengm/functions/explicit_function.hxx"
-#include "opengm/graphicalmodel/graphicalmodel_hdf5.hxx"
-
-namespace opengm {
-   namespace learning {
-      class HammingLoss{
-      public:
-          class Parameter{
-            public:
-            bool operator==(const HammingLoss & other) const{
-                return true;
-            }
-            bool operator<(const HammingLoss & other) const{
-                return false;
-            }
-            bool operator>(const HammingLoss & other) const{
-                return false;
-            }
-            /**
-             * serializes the parameter object to the given hdf5 group handle;
-             * the group must contain a dataset "lossType" containing the
-             * loss type as a string
-             **/
-            void save(hid_t& groupHandle) const;
-            inline void load(const hid_t& ) {}
-            static std::size_t getLossId() { return lossId_; }
-          private:
-            static const std::size_t lossId_ = 16000;
-          };
-
-      public:
-         HammingLoss(const Parameter& param = Parameter()) : param_(param){}
-
-         template<class GM, class IT1, class IT2>
-         double loss(const GM & gm, IT1 labelBegin, IT1 labelEnd, IT2 GTBegin,IT2 GTEnd) const;
-  
-         template<class GM, class IT>
-         void addLoss(GM& gm, IT GTBegin) const;
-      private:
-         Parameter param_;
-      };
-
-      inline void HammingLoss::Parameter::save(hid_t &groupHandle) const {
-          std::vector<std::size_t> name;
-          name.push_back(this->getLossId());
-          marray::hdf5::save(groupHandle,"lossId",name);
-      }
-
-      template<class GM, class IT1, class IT2>
-      double HammingLoss::loss(const GM & gm, IT1 labelBegin, const IT1 labelEnd, IT2 GTBegin, const IT2 GTEnd) const
-      {
-         double loss = 0.0;
-         for(; labelBegin!= labelEnd; ++labelBegin, ++GTBegin){
-            if(*labelBegin != *GTBegin){
-               loss += 1.0;
-            }
-         }
-         return loss;
-      }
-
-      template<class GM, class IT>
-      void HammingLoss::addLoss(GM& gm, IT gt) const
-      {
-
-         for(typename GM::IndexType i=0; i<gm.numberOfVariables(); ++i){
-            typename GM::LabelType numL = gm.numberOfLabels(i);
-            opengm::ExplicitFunction<typename GM::ValueType,typename GM::IndexType, typename GM::LabelType> f(&numL, &numL+1,-1);
-            f(*gt) = 0;
-            ++gt;
-            gm.addFactor(gm.addFunction(f), &i, &(i)+1);
-         }
-      }
-
-   }  
-} // namespace opengm
-
-#endif 
diff --git a/include/opengm/learning/loss/noloss.hxx b/include/opengm/learning/loss/noloss.hxx
deleted file mode 100644
index e207433..0000000
--- a/include/opengm/learning/loss/noloss.hxx
+++ /dev/null
@@ -1,73 +0,0 @@
-#pragma once
-#ifndef OPENGM_NO_LOSS_HXX
-#define OPENGM_NO_LOSS_HXX
-
-#include "opengm/functions/explicit_function.hxx"
-#include "opengm/graphicalmodel/graphicalmodel_hdf5.hxx"
-
-namespace opengm {
-namespace learning {
-
-    class NoLoss{
-    public:
-        class Parameter{
-        public:
-            bool operator==(const NoLoss & other) const{
-                return true;
-            }
-            bool operator<(const NoLoss & other) const{
-                return false;
-            }
-            bool operator>(const NoLoss & other) const{
-                return false;
-            }
-            /**
-             * serializes the parameter object to the given hdf5 group handle;
-             * the group must contain a dataset "lossType" containing the
-             * loss type as a string
-             **/
-            void save(hid_t& groupHandle) const;
-            inline void load(const hid_t& ) {}
-            static std::size_t getLossId() { return lossId_; }
-        private:
-            static const std::size_t lossId_ = 0;
-        };
-
-    public:
-        NoLoss(const Parameter& param = Parameter()) 
-        : param_(param){
-
-        }
-
-        template<class GM, class IT1, class IT2>
-        double loss(const GM & gm, IT1 labelBegin, IT1 labelEnd, IT2 GTBegin,IT2 GTEnd) const;
-
-        template<class GM, class IT>
-        void addLoss(GM& gm, IT GTBegin) const;
-    private:
-        Parameter param_;
-
-    };
-
-    inline void NoLoss::Parameter::save(hid_t& groupHandle) const {
-        std::vector<std::size_t> name;
-        name.push_back(this->getLossId());
-        marray::hdf5::save(groupHandle,"lossId",name);
-    }
-
-    template<class GM, class IT1, class IT2>
-    double NoLoss::loss(const GM & gm, IT1 labelBegin, const IT1 labelEnd, IT2 GTBegin, const IT2 GTEnd) const
-    {
-        double loss = 0.0;
-        return loss;
-    }
-
-    template<class GM, class IT>
-    void NoLoss::addLoss(GM& gm, IT gt) const
-    {
-    }
-
-}  
-} // namespace opengm
-
-#endif 
diff --git a/include/opengm/learning/maximum-likelihood-learning.hxx b/include/opengm/learning/maximum-likelihood-learning.hxx
deleted file mode 100644
index 3bac158..0000000
--- a/include/opengm/learning/maximum-likelihood-learning.hxx
+++ /dev/null
@@ -1,310 +0,0 @@
-#pragma once
-#ifndef OPENGM_MAXIMUM_LIKELIHOOD_LEARNER_HXX
-#define OPENGM_MAXIMUM_LIKELIHOOD_LEARNER_HXX
-
-#include <vector>
-#include <fstream>
-#include <opengm/inference/messagepassing/messagepassing.hxx>
-//#include <opengm/functions/explicit_function.hxx>
-#include <opengm/functions/view_convert_function.hxx>
-//#include <opengm/functions/learnable/lpotts.hxx>
-//#include <opengm/functions/learnable/lweightedsum_of_functions.hxx>
-#include <opengm/graphicalmodel/graphicalmodel.hxx>
-//#include <opengm/inference/icm.hxx>
-//
-//typedef double ValueType;
-//typedef size_t IndexType;
-//typedef size_t LabelType;
-//typedef opengm::meta::TypeListGenerator<
-//    opengm::ExplicitFunction<ValueType,IndexType,LabelType>,
-//    opengm::functions::learnable::LPotts<ValueType,IndexType,LabelType>,
-//    opengm::functions::learnable::LWeightedSumOfFunctions<ValueType,IndexType,LabelType>
-//>::type FunctionListType;
-//
-//typedef opengm::GraphicalModel<
-//    ValueType,opengm::Adder,
-//    FunctionListType,
-//    opengm::DiscreteSpace<IndexType,LabelType>
-//> GM;
-//
-//typedef opengm::ICM<GM,opengm::Minimizer> INF;
-//typedef opengm::learning::Weights<ValueType> WeightType;
-
-
-
-namespace opengm {
-namespace learning {
-
-template<class IT> 
-class WeightGradientFunctor{
-public:
-   WeightGradientFunctor(size_t weightIndex, IT labelVectorBegin) //std::vector<size_t>::iterator labelVectorBegin)
-        : weightIndex_(weightIndex),
-          labelVectorBegin_(labelVectorBegin){
-    }
-
-    template<class F>
-    void operator()(const F & function ){
-        size_t index=-1;
-        for(size_t i=0; i<function.numberOfWeights();++i)
-            if(function.weightIndex(i)==weightIndex_)
-                index=i;
-        if(index!=-1)
-            result_ = function.weightGradient(index, labelVectorBegin_);
-        else
-            result_ = 0;
-    }
-
-    size_t weightIndex_;
-    IT  labelVectorBegin_;
-    double result_;
-};
-
-template<class DATASET>
-class MaximumLikelihoodLearner
-{
-public:
-    typedef DATASET DatasetType;
-    typedef typename DATASET::GMType   GMType;
-    typedef typename GMType::ValueType ValueType;
-    typedef typename GMType::IndexType IndexType;
-    typedef typename GMType::LabelType LabelType;
-    typedef typename GMType::FactorType FactorType;
-    typedef opengm::learning::Weights<ValueType> WeightType;  
-
-    typedef typename opengm::ExplicitFunction<ValueType,IndexType,LabelType> FunctionType;
-    typedef typename opengm::ViewConvertFunction<GMType,Minimizer,ValueType> ViewFunctionType;
-    typedef typename GMType::FunctionIdentifier FunctionIdentifierType;
-    typedef typename opengm::meta::TypeListGenerator<FunctionType,ViewFunctionType>::type FunctionListType;
-    typedef opengm::GraphicalModel<ValueType,opengm::Multiplier, FunctionListType, opengm::DiscreteSpace<IndexType,LabelType> > GmBpType;
-    typedef BeliefPropagationUpdateRules<GmBpType, opengm::Integrator> UpdateRules;
-    typedef MessagePassing<GmBpType, opengm::Integrator, UpdateRules, opengm::MaxDistance> BeliefPropagation;
-   
-    class Parameter{
-    public:
-       size_t maxNumSteps_;
-       Parameter() :
-          maxNumSteps_(100)
-          {;}
-    };
-   
-
-    MaximumLikelihoodLearner(DATASET&, const Parameter & w= Parameter() );
-
-   //  template<class INF>
-   void learn();//const typename INF::Parameter&);
-
-    const opengm::learning::Weights<ValueType>& getModelWeights(){return modelWeights_;}
-    Parameter& getLerningWeights(){return param_;}
-
-private:
-    DATASET& dataset_;
-    opengm::learning::Weights<ValueType> modelWeights_;
-    Parameter param_;
-};
-
-template<class DATASET>
-MaximumLikelihoodLearner<DATASET>::MaximumLikelihoodLearner(DATASET& ds, const Parameter& w)
-    : dataset_(ds), param_(w)
-{
-    modelWeights_ = opengm::learning::Weights<ValueType>(ds.getNumberOfWeights());
-}
-
-
-template<class DATASET>
-//template<class INF>
-void MaximumLikelihoodLearner<DATASET>::learn(){//const typename INF::Parameter &infParam){
-
-    opengm::learning::Weights<ValueType> modelWeight( dataset_.getNumberOfWeights() );
-    opengm::learning::Weights<ValueType> bestModelWeight( dataset_.getNumberOfWeights() );
-    //double bestLoss = 100000000.0;
-    std::vector<ValueType> point(dataset_.getNumberOfWeights(),0);
-    std::vector<ValueType> gradient(dataset_.getNumberOfWeights(),0);
-    std::vector<ValueType> Delta(dataset_.getNumberOfWeights(),0);
-    for(IndexType p=0; p<dataset_.getNumberOfWeights(); ++p)
-        point[p] = ValueType((0));
-
-
-    typename DATASET::LossType lossFunction;
-    bool search=true;
-    int count=0;
-
-    std::vector< std::vector<ValueType> > w( dataset_.getNumberOfModels(), std::vector<ValueType> ( dataset_.getModel(0).numberOfVariables()) );
-
-    /***********************************************************************************************************/
-    // construct Ground Truth dependent weights
-    /***********************************************************************************************************/
-
-    for(IndexType m=0; m<dataset_.getNumberOfModels(); ++m){ // for each model
-        const GMType &model = dataset_.getModel(m);
-        const std::vector<LabelType>& gt =  dataset_.getGT(m);
-
-        for(IndexType v=0; v<model.numberOfVariables();++v)
-            w[m][v]=(ValueType)gt[v];
-    }
-
-    ValueType eta = 0.1;
-    ValueType delta = 0.25; // 0 <= delta <= 0.5
-    ValueType D_a = 1.0; // distance treshold
-    ValueType optFun, bestOptFun=0.0;
-
-    while(search){
-        ++count;
-        //if (count % 1000 == 0)
-        std::cout << "---count--->" << count << "     ";
-
-        // Get Weights
-        for(IndexType p=0; p<dataset_.getNumberOfWeights(); ++p){
-            modelWeight.setWeight(p, point[p]);
-        }
-
-        // /***********************************************************************************************************/
-        // // calculate current loss - not needed
-        // /***********************************************************************************************************/
-        // opengm::learning::Weights<ValueType>& mp =  dataset_.getWeights();
-        // mp = modelWeight;
-        // std::vector< std::vector<typename INF::LabelType> > confs( dataset_.getNumberOfModels() );
-        // double loss = 0;
-        // for(size_t m=0; m<dataset_.getNumberOfModels(); ++m){
-        //    INF inf( dataset_.getModel(m),infParam);
-        //    inf.infer();
-        //    inf.arg(confs[m]);
-        //    const std::vector<typename INF::LabelType>& gt =  dataset_.getGT(m);
-        //    loss += lossFunction.loss(dataset_.getModel(m), confs[m].begin(), confs[m].end(), gt.begin(), gt.end());
-        // }
-
-        // std::cout << " eta = " << eta << "   weights  ";//<< std::endl;
-        // for(IndexType p=0; p<dataset_.getNumberOfWeights(); ++p){
-        //     std::cout << modelWeight[p] << " " ;
-        // }
-
-        // optFun=0.0;
-
-        /***********************************************************************************************************/
-        // Loopy Belief Propagation setup
-        /***********************************************************************************************************/
-     
-
-        const IndexType maxNumberOfIterations = 40;
-        const double convergenceBound = 1e-7;
-        const double damping = 0.5;
-        typename BeliefPropagation::Parameter weight(maxNumberOfIterations, convergenceBound, damping);
-
-        std::vector< std::vector<ValueType> > b  ( dataset_.getNumberOfModels(), std::vector<ValueType> ( dataset_.getModel(0).numberOfFactors()) );
-
-        for(IndexType m=0; m<dataset_.getNumberOfModels(); ++m){
-
-           //****************************************
-           // Build dummy model
-           //***************************************
-            GmBpType bpModel(dataset_.getModel(m).space());
-
-            for(IndexType f = 0; f<dataset_.getModel(m).numberOfFactors();++f){
-                const typename GMType::FactorType& factor=dataset_.getModel(m)[f];
-                typedef typename opengm::ViewConvertFunction<GMType,Minimizer,ValueType> ViewFunctionType;
-                typedef typename GMType::FunctionIdentifier FunctionIdentifierType;
-                FunctionIdentifierType fid = bpModel.addFunction(ViewFunctionType(factor));
-                bpModel.addFactor(fid, factor.variableIndicesBegin(), factor.variableIndicesEnd());
-            }
-            /***********************************************************************************************************/
-            // run: Loopy Belief Propagation
-            /***********************************************************************************************************/
-            BeliefPropagation bp(bpModel, weight);
-            const std::vector<LabelType>& gt =  dataset_.getGT(m);
-            bp.infer();
-            typename GMType::IndependentFactorType marg;
-
-            for(IndexType f = 0; f<dataset_.getModel(m).numberOfFactors();++f){
-                bp.factorMarginal(f, marg);
-                std::vector<IndexType> indexVector( marg.variableIndicesBegin(), marg.variableIndicesEnd() );
-                std::vector<LabelType> labelVector( marg.numberOfVariables());
-                for(IndexType v=0; v<marg.numberOfVariables();++v)
-                    labelVector[v] = gt[indexVector[v]];
-                b[m][f] = marg(labelVector.begin());
-            }
-        }
-
-        /***********************************************************************************************************/
-        // Calculate Gradient
-        /***********************************************************************************************************/
-        std::vector<ValueType> sum(dataset_.getNumberOfWeights());
-        for(IndexType p=0; p<dataset_.getNumberOfWeights();++p){
-            std::vector< std::vector<ValueType> >
-                piW(dataset_.getNumberOfModels(),
-                    std::vector<ValueType> ( dataset_.getModel(0).numberOfFactors()));
-
-            for(IndexType m=0; m<dataset_.getNumberOfModels(); ++m){
-                const GMType &model = dataset_.getModel(m);
-                const std::vector<LabelType>& gt =  dataset_.getGT(m);
-                ValueType f_p;
-
-                for(IndexType f=0; f<dataset_.getModel(m).numberOfFactors();++f){
-                    const FactorType &factor = dataset_.getModel(m)[f];
-                    std::vector<IndexType> indexVector( factor.variableIndicesBegin(), factor.variableIndicesEnd() );
-                    std::vector<LabelType> labelVector( factor.numberOfVariables());
-                    piW[m][f]=1.0;
-
-                    for(IndexType v=0; v<factor.numberOfVariables();++v){
-                        labelVector[v] = gt[indexVector[v]];
-                        piW[m][f] *=w[m][indexVector[v]];
-                    }
-                    WeightGradientFunctor<typename std::vector<LabelType>::iterator> weightGradientFunctor(p, labelVector.begin());
-                    factor.callFunctor(weightGradientFunctor);
-                    f_p =weightGradientFunctor.result_;
-
-                    // gradient
-                    // ( marginals - ground_truth ) * factor_gradient_p
-                    sum[p] += (b[m][f] - piW[m][f]) * f_p;
-
-                    // likelihood function
-                    // marginals - ground_truth * factor
-                    optFun += b[m][f] - piW[m][f] * factor(labelVector.begin());
-                }
-            }
-        }
-        //std::cout << " loss = " << loss << " optFun = " << optFun << " optFunTmp = " << optFunTmp << std::endl;
-        //std::cout << " loss = " << loss << " optFun = " << optFun << std::endl; 
-        std::cout << " optFun = " << optFun << std::endl;
-
-        if(optFun>=bestOptFun){
-            bestOptFun=optFun;
-            bestModelWeight=modelWeight;
-            bestOptFun=optFun;
-            //bestLoss=loss;
-        }
-
-        if (count>=param_.maxNumSteps_){
-            search = false;
-        }else{
-            // Calculate the next point
-            ValueType norm2=0.0;
-            for(IndexType p=0; p<dataset_.getNumberOfWeights(); ++p){
-                gradient[p] = sum[p];
-                norm2 += gradient[p]*gradient[p];
-            }
-            norm2 = std::sqrt(norm2);
-            for(IndexType p=0; p<dataset_.getNumberOfWeights(); ++p){
-                gradient[p] /= norm2;
-                std::cout << " gradient [" << p << "] = " << gradient[p] << std::endl;
-                point[p] += eta * gradient[p];
-
-            }
-            eta *= (ValueType)count/(count+1);
-        }
-    } // end while search
-
-    std::cout <<std::endl<< "Best weights: ";
-    for(IndexType p=0; p<dataset_.getNumberOfWeights(); ++p){
-        std::cout << bestModelWeight[p] <<" ";
-    }
-    std::cout << " ==> ";
-    //std::cout << " loss = " << bestLoss << " bestOptFun = " << bestOptFun << " gradient [" << 0 << "] = " << gradient[0] << std::endl;
-    std::cout << " bestOptFun = " << bestOptFun << " gradient [" << 0 << "] = " << gradient[0] << std::endl;
-
-    modelWeights_ = bestModelWeight;
-};
-}
-}
-#endif
-
-
diff --git a/include/opengm/learning/maximum_likelihood_learning.hxx b/include/opengm/learning/maximum_likelihood_learning.hxx
deleted file mode 100644
index d8c54b2..0000000
--- a/include/opengm/learning/maximum_likelihood_learning.hxx
+++ /dev/null
@@ -1,238 +0,0 @@
-#pragma once
-#ifndef OPENGM_MAXIMUM_LIKELIHOOD_LEARNER_HXX
-#define OPENGM_MAXIMUM_LIKELIHOOD_LEARNER_HXX
-
-#include <vector>
-#include <fstream>
-#include <opengm/graphicalmodel/graphicalmodel.hxx>
-#include <opengm/inference/messagepassing/messagepassing.hxx>
-#include <opengm/functions/view_convert_function.hxx>
-#include <iomanip>
-
-namespace opengm {
-   namespace learning {
-
-      template<class DATASET>
-      class MaximumLikelihoodLearner
-      {
-      public:
-         typedef DATASET                     DatasetType;
-         typedef typename DATASET::GMType    GMType;
-         typedef typename GMType::ValueType  ValueType;
-         typedef typename GMType::IndexType  IndexType;
-         typedef typename GMType::LabelType  LabelType;
-         typedef typename GMType::FactorType FactorType;
-         typedef Weights<ValueType>          WeightType;  
-
-         class Parameter{
-         public:
-	     size_t maximumNumberOfIterations_;
-	     double gradientStepSize_;
-	     double weightStoppingCriteria_;
-             double gradientStoppingCriteria_;
-             bool infoFlag_;
-             bool infoEveryStep_; 
-             double weightRegularizer_;
-	     size_t beliefPropagationMaximumNumberOfIterations_;
-	     double beliefPropagationConvergenceBound_;
-	     double beliefPropagationDamping_;
-	     double beliefPropagationTemperature_;
-	     opengm::Tribool beliefPropagationIsAcyclic_;
-	     Parameter():
-	         maximumNumberOfIterations_(100),
-	         gradientStepSize_(0.1),
-		 weightStoppingCriteria_(0.0000000000000001),
-		 gradientStoppingCriteria_(0.0000000000000001),
-		 infoFlag_(true),
-		 infoEveryStep_(false),
-		 weightRegularizer_(1.0),
-		 beliefPropagationMaximumNumberOfIterations_(40),
-		 beliefPropagationConvergenceBound_(0.0000001),
-		 beliefPropagationDamping_(0.5),
-		 beliefPropagationTemperature_(0.3),
-		 beliefPropagationIsAcyclic_(opengm::Tribool::Maybe)
-
-	   {;}
-         };
-
-         class WeightGradientFunctor{
-         public:
-            WeightGradientFunctor(DatasetType& ds) : dataset_(ds) { gradient_.resize(ds.getNumberOfWeights(),0.0);}
-            void setModel(size_t m) { modelID_ = m; } 
-            void setMarg(typename GMType::IndependentFactorType* marg){marg_= marg;}
-            double getGradient(size_t i) {return gradient_[i];}
-            
-            template<class F>
-            void operator()(const F & function ){
-               std::vector<LabelType> labelVector(marg_->numberOfVariables());
-               for(size_t i=0; i<marg_->numberOfVariables(); ++i)
-                  labelVector[i] = dataset_.getGT(modelID_)[marg_->variableIndex(i)]; 
-               for(size_t i=0; i<function.numberOfWeights();++i){
-		  size_t wID = function.weightIndex(i);
-                  gradient_[wID] -= function.weightGradient(i, labelVector.begin());
-               } 
-               
-               opengm::ShapeWalker<typename F::FunctionShapeIteratorType> shapeWalker(function.functionShapeBegin(), function.dimension());
-               for(size_t i=0;i<function.size();++i, ++shapeWalker) {                   
-                  for(size_t i=0; i<function.numberOfWeights();++i){
-                     size_t wID = function.weightIndex(i);
-                     gradient_[wID] += (*marg_)(shapeWalker.coordinateTuple().begin()) * function.weightGradient(i, shapeWalker.coordinateTuple().begin() );
-                  }
-               }              
-            }
-            
-         private:
-            DatasetType&                            dataset_;
-            size_t                                  modelID_;
-            std::vector<double>                     gradient_;  
-            typename GMType::IndependentFactorType* marg_;
-         };
-         
-         MaximumLikelihoodLearner(DATASET&, const Parameter&);
-
-	 void learn();
-         
-         const opengm::learning::Weights<ValueType>& getModelWeights(){return weights_;}
-         WeightType& getLerningWeights(){return weights_;}
-
-      private:
-         DATASET&     dataset_;
-         WeightType   weights_;
-         Parameter    param_;
-      }; 
-
-      template<class DATASET>
-      MaximumLikelihoodLearner<DATASET>::MaximumLikelihoodLearner(DATASET& ds, const Parameter& param )
-         : dataset_(ds), param_(param)
-      {
-          weights_ = opengm::learning::Weights<ValueType>(ds.getNumberOfWeights());
-      }
-
-      template<class DATASET>
-      void MaximumLikelihoodLearner<DATASET>::learn(){
-
-         typedef typename opengm::ExplicitFunction<ValueType,IndexType,LabelType>                                                    FunctionType;
-         typedef typename opengm::ViewConvertFunction<GMType,Minimizer,ValueType>                                                    ViewFunctionType;
-         typedef typename GMType::FunctionIdentifier                                                                                 FunctionIdentifierType;
-         typedef typename opengm::meta::TypeListGenerator<FunctionType,ViewFunctionType>::type                                       FunctionListType;
-         typedef opengm::GraphicalModel<ValueType,opengm::Multiplier, FunctionListType, opengm::DiscreteSpace<IndexType,LabelType> > GmBpType;
-         typedef BeliefPropagationUpdateRules<GmBpType, opengm::Integrator>                                                          UpdateRules;
-         typedef MessagePassing<GmBpType, opengm::Integrator, UpdateRules, opengm::MaxDistance>                                      BeliefPropagation;
-         
-         bool search = true; 
-         double invTemperature = 1.0/param_.beliefPropagationTemperature_;
-
-         if(param_.infoFlag_){
-	     std::cout << "INFO: Maximum Likelihood Learner: Maximum Number Of Iterations "<< param_.maximumNumberOfIterations_ << std::endl;
-	     std::cout << "INFO: Maximum Likelihood Learner: Gradient Step Size "<< param_.gradientStepSize_ << std::endl;
-	     std::cout << "INFO: Maximum Likelihood Learner: Gradient Stopping Criteria "<<param_. gradientStoppingCriteria_ << std::endl;
-	     std::cout << "INFO: Maximum Likelihood Learner: Weight Stopping Criteria "<< param_.weightStoppingCriteria_ << std::endl;
-	     std::cout << "INFO: Maximum Likelihood Learner: Info Flag "<< param_.infoFlag_ << std::endl;
-	     std::cout << "INFO: Maximum Likelihood Learner: Info Every Step "<< param_.infoEveryStep_ << std::endl;
-	     std::cout << "INFO: Maximum Likelihood Learner: Strength of regularizer for the Weight "<< param_.weightRegularizer_ << std::endl;
-	     std::cout << "INFO: Belief Propagation: Maximum Number Of Belief Propagation Iterations "<< param_.beliefPropagationMaximumNumberOfIterations_ << std::endl;
-	     std::cout << "INFO: Belief Propagation: Convergence Bound "<< param_.beliefPropagationConvergenceBound_ << std::endl;
-	     std::cout << "INFO: Belief Propagation: Damping "<< param_.beliefPropagationDamping_ << std::endl;
-	     std::cout << "INFO: Belief Propagation: Temperature "<< param_.beliefPropagationTemperature_ << std::endl;
-	     std::cout << "INFO: Belief Propagation: Acyclic Model "<< param_.beliefPropagationIsAcyclic_ << std::endl;
-	 }
-
-	 typename UpdateRules::SpecialParameterType specialParameter;//=UpdateRules::SpecialParameterType();
-         typename BeliefPropagation::Parameter infParam(
-	     param_.beliefPropagationMaximumNumberOfIterations_, 
-	     param_.beliefPropagationConvergenceBound_, 
-	     param_.beliefPropagationDamping_,
-	     specialParameter,
-	     param_.beliefPropagationIsAcyclic_
-	 );
-
-         size_t iterationCount = 0;
-         while(search){
-            if(iterationCount>=param_.maximumNumberOfIterations_) break;
-            ++iterationCount;
-	    if(param_.infoFlag_)
-	        std::cout << "\r Progress :  " << iterationCount << "/"<<param_.maximumNumberOfIterations_ <<" iteration     0/"<< dataset_.getNumberOfModels() << " models ";
-
-            typename GMType::IndependentFactorType marg;
-            WeightGradientFunctor wgf(dataset_); 
-
-            for(IndexType m=0; m<dataset_.getNumberOfModels(); ++m){ 
-	       if(param_.infoFlag_)
-                  std::cout << "\r Progress :  " << iterationCount << "/"<<param_.maximumNumberOfIterations_ << " iteration     "<<m<<"/"<< dataset_.getNumberOfModels()<<" models ";
-
-               dataset_.lockModel(m);
-               wgf.setModel(m);
-
-               //*********************************
-               //** Build dummy model and infer
-               //*********************************
-               GmBpType bpModel(dataset_.getModel(m).space());
-               for(IndexType f = 0; f<dataset_.getModel(m).numberOfFactors();++f){
-                  const typename GMType::FactorType& factor=dataset_.getModel(m)[f];
-                  typedef typename opengm::ViewConvertFunction<GMType,Minimizer,ValueType> ViewFunctionType;
-                  typedef typename GMType::FunctionIdentifier FunctionIdentifierType;
-                  FunctionIdentifierType fid = bpModel.addFunction(ViewFunctionType(factor,invTemperature));
-                  bpModel.addFactor(fid, factor.variableIndicesBegin(), factor.variableIndicesEnd());
-               } 
-
-               BeliefPropagation bp(bpModel, infParam);
-               bp.infer();
-               for(IndexType f=0; f<dataset_.getModel(m).numberOfFactors();++f){
-                  bp.factorMarginal(f, marg);
-                  
-                  
-                  wgf.setMarg(&marg);
-                  dataset_.getModel(m)[f].callFunctor(wgf);
-               }
-               dataset_.unlockModel(m);
-
-            }
-
-
-            //*****************************
-            //** Gradient Step
-            //************************
-            double gradientNorm = 0;
-            for(IndexType p=0; p<dataset_.getNumberOfWeights(); ++p){
-               gradientNorm += (wgf.getGradient(p)-2*param_.weightRegularizer_*weights_.getWeight(p)) * (wgf.getGradient(p)-2*param_.weightRegularizer_*weights_.getWeight(p));
-            }
-            gradientNorm = std::sqrt(gradientNorm);
-
-	    if(gradientNorm < param_.gradientStoppingCriteria_)
-	        search = false;
-
-	    if(param_.infoFlag_ and param_.infoEveryStep_)
-	        std::cout << "\r" << std::flush << " Iteration " << iterationCount <<" Gradient = ( ";
-
-	    double normGradientDelta = 0;
-            for(IndexType p=0; p<dataset_.getNumberOfWeights(); ++p){
-	        if(param_.infoFlag_ and param_.infoEveryStep_)
-		    std::cout << std::left << std::setfill(' ') << std::setw(10) << (wgf.getGradient(p)-2*param_.weightRegularizer_*weights_.getWeight(p))/gradientNorm << " ";
-
-		double gradientDelta;
-		gradientDelta=param_.gradientStepSize_/iterationCount * (wgf.getGradient(p)-2*param_.weightRegularizer_*weights_.getWeight(p))/gradientNorm;
-
-		normGradientDelta +=gradientDelta*gradientDelta;
-                dataset_.getWeights().setWeight(p, weights_.getWeight(p) + gradientDelta);
-                weights_.setWeight(p, weights_.getWeight(p) + gradientDelta); 
-            }
-	    normGradientDelta=std::sqrt(normGradientDelta);
-	    if( normGradientDelta < param_.weightStoppingCriteria_)
-	        search = false;
-
-	    if(param_.infoFlag_ and param_.infoEveryStep_){
-                std::cout << ") ";
-                std::cout << " Weight = ( ";
-                for(IndexType p=0; p<dataset_.getNumberOfWeights(); ++p)
-                    std::cout << std::left << std::setfill(' ') << std::setw(10) <<  weights_.getWeight(p) << " ";
-                std::cout << ") "<< "GradientNorm " << std::left << std::setfill(' ') << std::setw(10) << gradientNorm << " GradientDeltaNorm "<< std::setw(10) << normGradientDelta << "             " << std::endl;
-	    }
-	    else if (param_.infoFlag_)
-	      std::cout << "GradientNorm " << std::left << std::setfill(' ') << std::setw(10) << gradientNorm << " GradientDeltaNorm "<< std::setw(10) << normGradientDelta << "             " << std::flush;
-         }
-	 std::cout << "\r                                                                                                                                                                                                                                                                                                                                                                                                            " << std::flush;
-         std::cout << "\r Stoped after "<< iterationCount  << "/" << param_.maximumNumberOfIterations_<< " iterations. " <<std::endl;
-      }
-   }
-}
-#endif
diff --git a/include/opengm/learning/rws.hxx b/include/opengm/learning/rws.hxx
deleted file mode 100644
index 42c7cd0..0000000
--- a/include/opengm/learning/rws.hxx
+++ /dev/null
@@ -1,286 +0,0 @@
-#pragma once
-#ifndef OPENGM_SUBGRADIENT_SSVM_LEARNER_HXX
-#define OPENGM_SUBGRADIENT_SSVM_LEARNER_HXX
-
-#include <iomanip>
-#include <vector>
-#include <opengm/inference/inference.hxx>
-#include <opengm/graphicalmodel/weights.hxx>
-#include <opengm/utilities/random.hxx>
-#include <opengm/learning/gradient-accumulator.hxx>
-#include <opengm/learning/weight_averaging.hxx>
-
-#ifdef WITH_OPENMP
-#include <omp.h>
-#endif
-
-#include <boost/circular_buffer.hpp>
-#include <boost/math/distributions/normal.hpp>
-#include <boost/random/normal_distribution.hpp>
-#include <boost/random/mersenne_twister.hpp>
-#include <boost/random/variate_generator.hpp>
-
-
-namespace opengm {
-    namespace learning {
-
-
-
-    template<class T>
-    double gen_normal_3(T &generator)
-    {
-      return generator();
-    }
-
-    // Version that fills a vector
-    template<class T>
-    void gen_normal_3(T &generator,
-                  std::vector<double> &res)
-    {
-      for(size_t i=0; i<res.size(); ++i)
-        res[i]=generator();
-    }
-
-
-           
-    template<class DATASET>
-    class Rws
-    {
-    public: 
-        typedef DATASET DatasetType;
-        typedef typename DATASET::GMType   GMType; 
-        typedef typename DATASET::GMWITHLOSS GMWITHLOSS;
-        typedef typename DATASET::LossType LossType;
-        typedef typename GMType::ValueType ValueType;
-        typedef typename GMType::IndexType IndexType;
-        typedef typename GMType::LabelType LabelType; 
-        typedef opengm::learning::Weights<double> WeightsType;
-        typedef typename std::vector<LabelType>::const_iterator LabelIterator;
-        typedef FeatureAccumulator<GMType, LabelIterator> FeatureAcc;
-
-        typedef std::vector<LabelType> ConfType;
-        typedef boost::circular_buffer<ConfType> ConfBuffer;
-        typedef std::vector<ConfBuffer> ConfBufferVec;
-
-        class Parameter{
-        public:
-
-
-
-            Parameter(){
-                eps_ = 0.00001;
-                maxIterations_ = 10000;
-                stopLoss_ = 0.0;
-                learningRate_ = 1.0;
-                C_ = 1.0;
-                averaging_ = -1;
-                p_ = 10;
-                sigma_ = 1.0;
-            }       
-
-            double eps_;
-            size_t maxIterations_;
-            double stopLoss_;
-            double learningRate_;
-            double C_;
-            int averaging_;
-            size_t p_;
-            double sigma_;
-        };
-
-
-        Rws(DATASET&, const Parameter& );
-
-        template<class INF>
-        void learn(const typename INF::Parameter& para); 
-        //template<class INF, class VISITOR>
-        //void learn(typename INF::Parameter para, VITITOR vis);
-
-        const opengm::learning::Weights<double>& getWeights(){return weights_;}
-        Parameter& getLerningParameters(){return para_;}
-
-
-
-        double getLoss(const GMType & gm ,const GMWITHLOSS  & gmWithLoss, std::vector<LabelType> & labels){
-
-            double loss = 0 ;
-            std::vector<LabelType> subConf(20,0);
-
-            for(size_t fi=gm.numberOfFactors(); fi<gmWithLoss.numberOfFactors(); ++fi){
-                for(size_t v=0; v<gmWithLoss[fi].numberOfVariables(); ++v){
-                    subConf[v] = labels[ gmWithLoss[fi].variableIndex(v)];
-                }
-                loss +=  gmWithLoss[fi](subConf.begin());
-            }
-            return loss;
-        }
-
-    private:
-
-        double updateWeights();
-
-        DATASET& dataset_;
-        WeightsType  weights_;
-        Parameter para_;
-        size_t iteration_;
-        FeatureAcc featureAcc_;
-        WeightRegularizer<ValueType> wReg_;
-        WeightAveraging<double> weightAveraging_;
-    }; 
-
-    template<class DATASET>
-    Rws<DATASET>::Rws(DATASET& ds, const Parameter& p )
-    :   dataset_(ds), 
-        para_(p),
-        iteration_(0),
-        featureAcc_(ds.getNumberOfWeights()),
-        wReg_(2, 1.0/p.C_),
-        weightAveraging_(ds.getWeights(),p.averaging_)
-    {
-        featureAcc_.resetWeights();
-        weights_ = opengm::learning::Weights<double>(ds.getNumberOfWeights());
-    }
-
-
-    template<class DATASET>
-    template<class INF>
-    void Rws<DATASET>::learn(const typename INF::Parameter& para){
-
-
-        const size_t nModels = dataset_.getNumberOfModels();
-        const size_t nWegihts = dataset_.getNumberOfWeights();
-
-        
-        //for(size_t wi=0; wi<nWegihts; ++wi){
-        //    dataset_.getWeights().setWeight(wi, 0.0);
-        //}
-
-
-
-        RandomUniform<size_t> randModel(0, nModels);
-        boost::math::normal_distribution<ValueType> nDist(0.0, para_.sigma_);
-        std::vector< std::vector<ValueType> > noiseVecs(para_.p_, std::vector<ValueType>(nWegihts));
-        std::vector<ValueType> lossVec(para_.p_);
-
-        std::vector<ValueType> gradient(nWegihts);
-
-        boost::variate_generator<boost::mt19937, boost::normal_distribution<> >
-        generator(boost::mt19937(time(0)),boost::normal_distribution<>(0.0, para_.sigma_));
-
-        std::cout<<"online mode "<<nWegihts<<"\n";
-
-        std::cout <<"start loss"<< std::setw(6) << std::setfill(' ') << iteration_ << ':'
-                          << std::setw(8) << dataset_. template getTotalLossParallel<INF>(para) <<"  \n\n\n\n";
-
-
-        for(iteration_=0 ; iteration_<para_.maxIterations_; ++iteration_){
-
-
-
-
-            // get random model
-            const size_t gmi = randModel();
-
-            // save the current weights
-            WeightsType currentWeights  = dataset_.getWeights();
-
-
-            featureAcc_.resetWeights();
-
-            // lock the model
-            dataset_.lockModel(gmi);
-
-            for(size_t p=0; p<para_.p_; ++p){
-
-
-                // fill noise 
-                gen_normal_3(generator, noiseVecs[p]);
-
-                // add noise to the weights
-                for(size_t wi=0; wi<nWegihts; ++wi){
-                    const ValueType cw = currentWeights[wi];
-                    const ValueType nw = cw + noiseVecs[p][wi];
-                    dataset_.getWeights().setWeight(wi, nw);
-                }
-
-
-                const GMType & gm = dataset_.getModel(gmi);
-                // do inference
-                std::vector<LabelType> arg;
-                opengm::infer<INF>(gm, para, arg);
-                lossVec[p] = dataset_.getLoss(arg, gmi);
-                
-                //featureAcc_.accumulateModelFeatures(gm, dataset_.getGT(gmi).begin(), arg.begin());
-                // update weights
-                //const double wChange =updateWeights();      
-            }
-
-            //for(size_t wi=0; wi<nWegihts; ++wi){
-            //    gradient[wi] = featureAcc_.getWeight(wi);
-            //}
-            std::fill(gradient.begin(), gradient.end(),0.0);
-            for(size_t p=0; p<para_.p_; ++p){
-                for(size_t wi=0; wi<nWegihts; ++wi){
-                    gradient[wi] += (1.0/para_.p_)*(noiseVecs[p][wi])*lossVec[p];
-                }
-            }
-
-            const ValueType actualLearningRate = para_.learningRate_/(1.0 + iteration_);
-            //const ValueType actualLearningRate = para_.learningRate_;///(1.0 + iteration_);
-            // do update
-            for(size_t wi=0; wi<nWegihts; ++wi){
-                const ValueType oldWeight = currentWeights[wi];
-                const ValueType newWeights = (oldWeight - actualLearningRate*gradient[wi])*para_.C_;
-                //std::cout<<"wi "<<newWeights<<"\n";
-                dataset_.getWeights().setWeight(wi, newWeights);
-            }
-            std::cout<<"\n";
-            dataset_.unlockModel(gmi);
-
-            if(iteration_%10==0){
-            //if(iteration_%nModels*2 == 0 ){
-                std::cout << '\n'
-                          << std::setw(6) << std::setfill(' ') << iteration_ << ':'
-                          << std::setw(8) << dataset_. template getTotalLossParallel<INF>(para) <<"  "<< std::flush;
-
-            }
-
-        }
-  
-        weights_ = dataset_.getWeights();
-    }
-
-
-    template<class DATASET>
-    double Rws<DATASET>::updateWeights(){
-
-        const size_t nWegihts = dataset_.getNumberOfWeights();
-
-        WeightsType p(nWegihts);
-        WeightsType newWeights(nWegihts);
-
-
-        for(size_t wi=0; wi<nWegihts; ++wi){
-            p[wi] =  dataset_.getWeights().getWeight(wi);
-            p[wi] += para_.C_ * featureAcc_.getWeight(wi);
-        }
-
-
-        double wChange = 0.0;
-        
-        for(size_t wi=0; wi<nWegihts; ++wi){
-            const double wOld = dataset_.getWeights().getWeight(wi);
-            const double wNew = wOld - (para_.learningRate_/double(iteration_+1))*p[wi];
-            newWeights[wi] = wNew;
-        }
-
-        weightAveraging_(newWeights);
-
-
-
-        weights_ = dataset_.getWeights();
-        return wChange;
-    }
-}
-}
-#endif
diff --git a/include/opengm/learning/solver/BundleCollector.h b/include/opengm/learning/solver/BundleCollector.h
deleted file mode 100644
index a9bf12c..0000000
--- a/include/opengm/learning/solver/BundleCollector.h
+++ /dev/null
@@ -1,49 +0,0 @@
-#ifndef INFERENCE_BUNDLE_COLLECTOR_H__
-#define INFERENCE_BUNDLE_COLLECTOR_H__
-
-#include "LinearConstraints.h"
-
-namespace opengm {
-namespace learning {
-namespace solver {
-
-class BundleCollector {
-
-public:
-
-	template <typename ModelWeights>
-	void addHyperplane(const ModelWeights& a, double b);
-
-	const LinearConstraints& getConstraints() const { return _constraints; }
-
-private:
-
-	LinearConstraints _constraints;
-};
-
-template <typename ModelWeights>
-void
-BundleCollector::addHyperplane(const ModelWeights& a, double b) {
-	/*
-	  <w,a> + b ≤  ξ
-	        <=>
-	  <w,a> - ξ ≤ -b
-	*/
-
-	unsigned int dims = a.numberOfWeights();
-
-	LinearConstraint constraint;
-
-	for (unsigned int i = 0; i < dims; i++)
-		constraint.setCoefficient(i, a[i]);
-	constraint.setCoefficient(dims, -1.0);
-	constraint.setRelation(LessEqual);
-	constraint.setValue(-b);
-
-	_constraints.add(constraint);
-}
-
-}}} // namespace opengm::learning::solver
-
-#endif // INFERENCE_BUNDLE_COLLECTOR_H__
-
diff --git a/include/opengm/learning/solver/CplexBackend.h b/include/opengm/learning/solver/CplexBackend.h
deleted file mode 100644
index feda3f4..0000000
--- a/include/opengm/learning/solver/CplexBackend.h
+++ /dev/null
@@ -1,433 +0,0 @@
-#ifndef OPENGM_LEARNING_SOLVER_CPLEX_SOLVER_H__
-#define OPENGM_LEARNING_SOLVER_CPLEX_SOLVER_H__
-
-#ifdef WITH_CPLEX
-
-#include <string>
-#include <vector>
-
-#include <ilcplex/ilocplex.h>
-
-#include "LinearConstraints.h"
-#include "QuadraticObjective.h"
-#include "QuadraticSolverBackend.h"
-#include "Sense.h"
-#include "Solution.h"
-
-namespace opengm {
-namespace learning {
-namespace solver {
-
-/**
- * Cplex interface to solve the following (integer) quadratic program:
- *
- * min  <a,x> + xQx
- * s.t. Ax  == b
- *      Cx  <= d
- *      optionally: x_i \in {0,1} for all i
- *
- * Where (A,b) describes all linear equality constraints, (C,d) all linear
- * inequality constraints and x is the solution vector. a is a real-valued
- * vector denoting the coefficients of the objective and Q a PSD matrix giving
- * the quadratic coefficients of the objective.
- */
-class CplexBackend : public QuadraticSolverBackend {
-
-public:
-
-    struct Parameter {
-
-        Parameter() :
-            mipGap(0.0001),
-            mipFocus(0),
-            numThreads(0),
-            verbose(false) {}
-
-        // The Gurobi relative optimality gap.
-        double mipGap;
-
-        // The Gurobi MIP focus: 0 = balanced, 1 = feasible solutions, 2 =
-        // optimal solution, 3 = bound.
-        unsigned int mipFocus;
-
-        // The number of threads to be used by Gurobi. The default (0) uses all
-        // available CPUs.
-        unsigned int numThreads;
-
-        // Show the gurobi output.
-        bool verbose;
-    };
-
-    CplexBackend(const Parameter& parameter = Parameter());
-
-    virtual ~CplexBackend();
-
-    ///////////////////////////////////
-    // solver backend implementation //
-    ///////////////////////////////////
-
-    void initialize(
-            unsigned int numVariables,
-            VariableType variableType);
-
-    void initialize(
-            unsigned int                                numVariables,
-            VariableType                                defaultVariableType,
-            const std::map<unsigned int, VariableType>& specialVariableTypes);
-
-    void setObjective(const LinearObjective& objective);
-
-    void setObjective(const QuadraticObjective& objective);
-
-    void setConstraints(const LinearConstraints& constraints);
-
-    void addConstraint(const LinearConstraint& constraint);
-
-    bool solve(Solution& solution, double& value, std::string& message);
-
-private:
-
-    //////////////
-    // internal //
-    //////////////
-
-    // set the optimality gap
-    void setMIPGap(double gap);
-
-    // set the mpi focus
-    void setMIPFocus(unsigned int focus);
-
-    // set the number of threads to use
-    void setNumThreads(unsigned int numThreads);
-
-    // create a CPLEX constraint from a linear constraint
-    IloRange createConstraint(const LinearConstraint &constraint);
-
-    /**
-     * Enable solver output.
-     */
-    void setVerbose(bool verbose);
-
-    // size of a and x
-    unsigned int _numVariables;
-
-    // rows in A
-    unsigned int _numEqConstraints;
-
-    // rows in C
-    unsigned int _numIneqConstraints;
-
-    Parameter _parameter;
-
-    // the verbosity of the output
-    int _verbosity;
-
-    // a value by which to scale the objective
-    double _scale;
-
-    // Objective, constraints and cplex environment:
-    IloEnv env_;
-    IloModel model_;
-    IloNumVarArray x_;
-    IloRangeArray c_;
-    IloObjective obj_;
-    IloNumArray sol_;
-    IloCplex cplex_;
-    double constValue_;
-
-    typedef std::vector<IloExtractable> ConstraintVector;
-    ConstraintVector _constraints;
-};
-
-inline CplexBackend::CplexBackend(const Parameter& parameter) :
-    _parameter(parameter),
-    model_(env_),
-    x_(env_),
-    c_(env_),
-    obj_(env_),
-    sol_(env_)
-{
-    std::cout << "constructing cplex solver" << std::endl;
-}
-
-inline CplexBackend::~CplexBackend() {
-    std::cout << "destructing cplex solver..." << std::endl;
-}
-
-inline void
-CplexBackend::initialize(
-        unsigned int numVariables,
-        VariableType variableType) {
-
-    initialize(numVariables, variableType, std::map<unsigned int, VariableType>());
-}
-
-inline void
-CplexBackend::initialize(
-        unsigned int                                numVariables,
-        VariableType                                defaultVariableType,
-        const std::map<unsigned int, VariableType>& specialVariableTypes) {
-
-    _numVariables = numVariables;
-
-    // delete previous variables
-    x_.clear();
-
-    // add new variables to the model
-    if (defaultVariableType == Binary) {
-        std::cout << "creating " << _numVariables << " binary variables" << std::endl;
-        x_.add(IloNumVarArray(env_, _numVariables, 0, 1, ILOBOOL));
-    } else if (defaultVariableType == Continuous) {
-        std::cout << "creating " << _numVariables << " continuous variables" << std::endl;
-        x_.add(IloNumVarArray(env_, _numVariables, -IloInfinity, IloInfinity));
-    } else if (defaultVariableType == Integer) {
-        x_.add(IloNumVarArray(env_, _numVariables, -IloInfinity, IloInfinity, ILOINT));
-    }
-
-    // TODO: port me!
-//    // handle special variable types
-//    typedef std::map<unsigned int, VariableType>::const_iterator VarTypeIt;
-//    for (VarTypeIt i = specialVariableTypes.begin(); i != specialVariableTypes.end(); i++) {
-
-//        unsigned int v = i->first;
-//        VariableType type = i->second;
-
-//        char t = (type == Binary ? 'B' : (type == Integer ? 'I' : 'C'));
-//        _variables[v].set(GRB_CharAttr_VType, t);
-//    }
-
-    std::cout << "creating " << _numVariables << " ceofficients" << std::endl;
-}
-
-inline void
-CplexBackend::setObjective(const LinearObjective& objective) {
-
-    setObjective((QuadraticObjective)objective);
-}
-
-inline void
-CplexBackend::setObjective(const QuadraticObjective& objective) {
-
-    try {
-
-        // set sense of objective
-        if (objective.getSense() == Minimize)
-            obj_ = IloMinimize(env_);
-        else
-            obj_ = IloMaximize(env_);
-
-        // set the constant value of the objective
-        obj_.setConstant(objective.getConstant());
-
-        std::cout << "setting linear coefficients" << std::endl;
-
-        for(size_t i = 0; i < _numVariables; i++)
-        {
-            obj_.setLinearCoef(x_[i], objective.getCoefficients()[i]);
-        }
-
-        // set the quadratic coefficients for all pairs of variables
-        std::cout << "setting quadratic coefficients" << std::endl;
-
-        typedef std::map<std::pair<unsigned int, unsigned int>, double>::const_iterator QuadCoefIt;
-        for (QuadCoefIt i = objective.getQuadraticCoefficients().begin(); i != objective.getQuadraticCoefficients().end(); i++) {
-
-            const std::pair<unsigned int, unsigned int>& variables = i->first;
-            float value = i->second;
-
-            if (value != 0)
-                obj_.setQuadCoef(x_[variables.first], x_[variables.second], value);
-        }
-
-        model_.add(obj_);
-
-    } catch (IloCplex::Exception e) {
-
-        std::cerr << "CPLEX error: " << e.getMessage() << std::endl;
-    }
-}
-
-inline void
-CplexBackend::setConstraints(const LinearConstraints& constraints) {
-
-    // remove previous constraints
-    for (ConstraintVector::iterator constraint = _constraints.begin(); constraint != _constraints.end(); constraint++)
-        model_.remove(*constraint);
-    _constraints.clear();
-
-    // allocate memory for new constraints
-    _constraints.reserve(constraints.size());
-
-    try {
-        std::cout << "setting " << constraints.size() << " constraints" << std::endl;
-
-        IloExtractableArray cplex_constraints(env_);
-        for (LinearConstraints::const_iterator constraint = constraints.begin(); constraint != constraints.end(); constraint++) {
-            IloRange linearConstraint = createConstraint(*constraint);
-            _constraints.push_back(linearConstraint);
-            cplex_constraints.add(linearConstraint);
-        }
-
-        // add all constraints as batch to the model
-        model_.add(cplex_constraints);
-
-    } catch (IloCplex::Exception e) {
-
-        std::cerr << "error: " << e.getMessage() << std::endl;
-    }
-}
-
-inline void
-CplexBackend::addConstraint(const LinearConstraint& constraint) {
-
-    try {
-        std::cout << "adding a constraint" << std::endl;
-
-        // add to the model
-        _constraints.push_back(model_.add(createConstraint(constraint)));
-
-    } catch (IloCplex::Exception e) {
-
-        std::cerr << "error: " << e.getMessage() << std::endl;
-    }
-}
-
-inline IloRange
-CplexBackend::createConstraint(const LinearConstraint& constraint) {
-    // create the lhs expression
-    IloExpr linearExpr(env_);
-
-    // set the coefficients
-    typedef std::map<unsigned int, double>::const_iterator CoefIt;
-    for (CoefIt pair = constraint.getCoefficients().begin(); pair != constraint.getCoefficients().end(); pair++)
-    {
-        linearExpr.setLinearCoef(x_[pair->first], pair->second);
-    }
-
-    switch(constraint.getRelation())
-    {
-        case LessEqual:
-            return IloRange(env_, linearExpr, constraint.getValue());
-            break;
-        case GreaterEqual:
-            return IloRange(env_, constraint.getValue(), linearExpr);
-            break;
-    }
-}
-
-inline bool
-CplexBackend::solve(Solution& x, double& value, std::string& msg) {
-
-    try {
-        cplex_ = IloCplex(model_);
-        setVerbose(_parameter.verbose);
-
-        setMIPGap(_parameter.mipGap);
-
-        if (_parameter.mipFocus <= 3)
-            setMIPFocus(_parameter.mipFocus);
-        else
-            std::cerr << "Invalid value for MIP focus!" << std::endl;
-
-        setNumThreads(_parameter.numThreads);
-        if(!cplex_.solve()) {
-           std::cout << "failed to optimize. " << cplex_.getStatus() << std::endl;
-           msg = "Optimal solution *NOT* found";
-           return false;
-        }
-        else
-            if(_parameter.verbose == true)
-                msg = "Optimal solution found";
-
-        // extract solution
-        cplex_.getValues(sol_, x_);
-        x.resize(_numVariables);
-        for (unsigned int i = 0; i < _numVariables; i++)
-            x[i] = sol_[i];
-
-        // get current value of the objective
-        value = cplex_.getObjValue();
-
-        x.setValue(value);
-
-    } catch (IloCplex::Exception& e) {
-
-        std::cerr << "error: " << e.getMessage() << std::endl;
-
-        msg = e.getMessage();
-
-        return false;
-    }
-
-    return true;
-}
-
-inline void
-CplexBackend::setMIPGap(double gap) {
-     cplex_.setParam(IloCplex::EpGap, gap);
-}
-
-inline void
-CplexBackend::setMIPFocus(unsigned int focus) {
-    /*
-     * GUROBI and CPLEX have the same meaning for the values of the MIPFocus and MIPEmphasis parameter:
-     *
-     * GUROBI docs:
-     * If you are more interested in finding feasible solutions quickly, you can select MIPFocus=1.
-     * If you believe the solver is having no trouble finding good quality solutions,
-     * and wish to focus more attention on proving optimality, select MIPFocus=2.
-     * If the best objective bound is moving very slowly (or not at all), you may want to try MIPFocus=3
-     * to focus on the bound.
-     *
-     * CPLEX params:
-     * switch(focus) {
-        case MIP_EMPHASIS_BALANCED:
-            cplex_.setParam(IloCplex::MIPEmphasis, 0);
-            break;
-        case  MIP_EMPHASIS_FEASIBILITY:
-            cplex_.setParam(IloCplex::MIPEmphasis, 1);
-            break;
-        case MIP_EMPHASIS_OPTIMALITY:
-            cplex_.setParam(IloCplex::MIPEmphasis, 2);
-            break;
-        case MIP_EMPHASIS_BESTBOUND:
-            cplex_.setParam(IloCplex::MIPEmphasis, 3);
-            break;
-        }
-     */
-
-    cplex_.setParam(IloCplex::MIPEmphasis, focus);
-}
-
-inline void
-CplexBackend::setNumThreads(unsigned int numThreads) {
-    cplex_.setParam(IloCplex::Threads, numThreads);
-}
-
-inline void
-CplexBackend::setVerbose(bool verbose) {
-
-    // setup GRB environment
-    if (verbose)
-    {
-        cplex_.setParam(IloCplex::MIPDisplay, 1);
-        cplex_.setParam(IloCplex::SimDisplay, 1);
-        cplex_.setParam(IloCplex::SiftDisplay, 1);
-        cplex_.setParam(IloCplex::BarDisplay, 1);
-        cplex_.setParam(IloCplex::NetDisplay, 1);
-    }
-    else
-    {
-        cplex_.setParam(IloCplex::MIPDisplay, 0);
-        cplex_.setParam(IloCplex::SimDisplay, 0);
-        cplex_.setParam(IloCplex::SiftDisplay, 0);
-        cplex_.setParam(IloCplex::BarDisplay, 0);
-        cplex_.setParam(IloCplex::NetDisplay, 0);
-    }
-}
-
-}}} // namespace opengm::learning::solver
-
-#endif // WITH_CPLEX
-
-#endif // CPLEX_OPENGM_LEARNING_SOLVER_SOLVER_H__
diff --git a/include/opengm/learning/solver/GurobiBackend.h b/include/opengm/learning/solver/GurobiBackend.h
deleted file mode 100644
index 2638063..0000000
--- a/include/opengm/learning/solver/GurobiBackend.h
+++ /dev/null
@@ -1,439 +0,0 @@
-#ifndef OPENGM_LEARNING_SOLVER_GUROBI_SOLVER_H__
-#define OPENGM_LEARNING_SOLVER_GUROBI_SOLVER_H__
-
-#ifdef WITH_GUROBI
-
-#include <string>
-#include <vector>
-
-#include <gurobi_c++.h>
-
-#include "LinearConstraints.h"
-#include "QuadraticObjective.h"
-#include "QuadraticSolverBackend.h"
-#include "Sense.h"
-#include "Solution.h"
-
-namespace opengm {
-namespace learning {
-namespace solver {
-
-/**
- * Gurobi interface to solve the following (integer) quadratic program:
- *
- * min  <a,x> + xQx
- * s.t. Ax  == b
- *      Cx  <= d
- *      optionally: x_i \in {0,1} for all i
- *
- * Where (A,b) describes all linear equality constraints, (C,d) all linear
- * inequality constraints and x is the solution vector. a is a real-valued
- * vector denoting the coefficients of the objective and Q a PSD matrix giving
- * the quadratic coefficients of the objective.
- */
-class GurobiBackend : public QuadraticSolverBackend {
-
-public:
-
-	struct Parameter {
-
-		Parameter() :
-			mipGap(0.0001),
-			mipFocus(0),
-			numThreads(0),
-			verbose(false) {}
-
-		// The Gurobi relative optimality gap.
-		double mipGap;
-
-		// The Gurobi MIP focus: 0 = balanced, 1 = feasible solutions, 2 = 
-		// optimal solution, 3 = bound.
-		unsigned int mipFocus;
-
-		// The number of threads to be used by Gurobi. The default (0) uses all 
-		// available CPUs.
-		unsigned int numThreads;
-
-		// Show the gurobi output.
-		bool verbose;
-	};
-
-	GurobiBackend(const Parameter& parameter = Parameter());
-
-	virtual ~GurobiBackend();
-
-	///////////////////////////////////
-	// solver backend implementation //
-	///////////////////////////////////
-
-	void initialize(
-			unsigned int numVariables,
-			VariableType variableType);
-
-	void initialize(
-			unsigned int                                numVariables,
-			VariableType                                defaultVariableType,
-			const std::map<unsigned int, VariableType>& specialVariableTypes);
-
-	void setObjective(const LinearObjective& objective);
-
-	void setObjective(const QuadraticObjective& objective);
-
-	void setConstraints(const LinearConstraints& constraints);
-
-	void addConstraint(const LinearConstraint& constraint);
-
-	bool solve(Solution& solution, double& value, std::string& message);
-
-
-private:
-
-	//////////////
-	// internal //
-	//////////////
-
-	// dump the current problem to a file
-	void dumpProblem(std::string filename);
-
-	// set the optimality gap
-	void setMIPGap(double gap);
-
-	// set the mpi focus
-	void setMIPFocus(unsigned int focus);
-
-	// set the number of threads to use
-	void setNumThreads(unsigned int numThreads);
-
-    // create a gurobi constraint from a linear constraint
-    GRBConstr createConstraint(const LinearConstraint &constraint);
-
-	/**
-	 * Enable solver output.
-	 */
-	void setVerbose(bool verbose);
-
-	// size of a and x
-	unsigned int _numVariables;
-
-	// rows in A
-	unsigned int _numEqConstraints;
-
-	// rows in C
-	unsigned int _numIneqConstraints;
-
-	Parameter _parameter;
-
-	// the GRB environment
-	GRBEnv _env;
-
-	// the (binary) variables x
-	GRBVar* _variables;
-
-	// the objective
-	GRBQuadExpr _objective;
-
-	std::vector<GRBConstr> _constraints;
-
-	// the GRB model containing the objective and constraints
-	GRBModel _model;
-
-	// the verbosity of the output
-	int _verbosity;
-
-	// a value by which to scale the objective
-	double _scale;
-};
-
-inline GurobiBackend::GurobiBackend(const Parameter& parameter) :
-	_parameter(parameter),
-	_variables(0),
-	_model(_env) {
-}
-
-inline GurobiBackend::~GurobiBackend() {
-
-	std::cout << "destructing gurobi solver..." << std::endl;
-
-	if (_variables)
-		delete[] _variables;
-}
-
-inline void
-GurobiBackend::initialize(
-		unsigned int numVariables,
-		VariableType variableType) {
-
-	initialize(numVariables, variableType, std::map<unsigned int, VariableType>());
-}
-
-inline void
-GurobiBackend::initialize(
-		unsigned int                                numVariables,
-		VariableType                                defaultVariableType,
-		const std::map<unsigned int, VariableType>& specialVariableTypes) {
-
-	if (_parameter.verbose)
-		setVerbose(true);
-	else
-		setVerbose(false);
-
-	setMIPGap(_parameter.mipGap);
-
-	if (_parameter.mipFocus <= 3)
-		setMIPFocus(_parameter.mipFocus);
-	else
-		std::cerr << "Invalid value for MPI focus!" << std::endl;
-
-	setNumThreads(_parameter.numThreads);
-
-	_numVariables = numVariables;
-
-	// delete previous variables
-	if (_variables)
-		delete[] _variables;
-
-	// add new variables to the model
-	if (defaultVariableType == Binary) {
-
-		std::cout << "creating " << _numVariables << " binary variables" << std::endl;
-
-		_variables = _model.addVars(_numVariables, GRB_BINARY);
-
-		_model.update();
-
-	} else if (defaultVariableType == Continuous) {
-
-		std::cout << "creating " << _numVariables << " continuous variables" << std::endl;
-
-		_variables = _model.addVars(_numVariables, GRB_CONTINUOUS);
-
-		_model.update();
-
-		// remove default lower bound on variables
-		for (unsigned int i = 0; i < _numVariables; i++)
-			_variables[i].set(GRB_DoubleAttr_LB, -GRB_INFINITY);
-
-	} else if (defaultVariableType == Integer) {
-
-		std::cout << "creating " << _numVariables << " integer variables" << std::endl;
-
-		_variables = _model.addVars(_numVariables, GRB_INTEGER);
-
-		_model.update();
-
-		// remove default lower bound on variables
-		for (unsigned int i = 0; i < _numVariables; i++)
-			_variables[i].set(GRB_DoubleAttr_LB, -GRB_INFINITY);
-	}
-
-	// handle special variable types
-	typedef std::map<unsigned int, VariableType>::const_iterator VarTypeIt;
-	for (VarTypeIt i = specialVariableTypes.begin(); i != specialVariableTypes.end(); i++) {
-
-		unsigned int v = i->first;
-		VariableType type = i->second;
-
-		char t = (type == Binary ? 'B' : (type == Integer ? 'I' : 'C'));
-		_variables[v].set(GRB_CharAttr_VType, t);
-	}
-
-	std::cout << "creating " << _numVariables << " ceofficients" << std::endl;
-}
-
-inline void
-GurobiBackend::setObjective(const LinearObjective& objective) {
-
-	setObjective((QuadraticObjective)objective);
-}
-
-inline void
-GurobiBackend::setObjective(const QuadraticObjective& objective) {
-
-	try {
-
-		// set sense of objective
-		if (objective.getSense() == Minimize)
-			_model.set(GRB_IntAttr_ModelSense, 1);
-		else
-			_model.set(GRB_IntAttr_ModelSense, -1);
-
-		// set the constant value of the objective
-		_objective = objective.getConstant();
-
-		std::cout << "setting linear coefficients" << std::endl;
-
-		_objective.addTerms(&objective.getCoefficients()[0], _variables, _numVariables);
-
-		// set the quadratic coefficients for all pairs of variables
-		std::cout << "setting quadratic coefficients" << std::endl;
-
-		typedef std::map<std::pair<unsigned int, unsigned int>, double>::const_iterator QuadCoefIt;
-		for (QuadCoefIt i = objective.getQuadraticCoefficients().begin(); i != objective.getQuadraticCoefficients().end(); i++) {
-
-			const std::pair<unsigned int, unsigned int>& variables = i->first;
-			float value = i->second;
-
-			if (value != 0)
-				_objective += _variables[variables.first]*_variables[variables.second]*value;
-		}
-
-		_model.setObjective(_objective);
-
-		_model.update();
-
-	} catch (GRBException e) {
-
-		std::cerr << "error: " << e.getMessage() << std::endl;
-	}
-}
-
-inline void
-GurobiBackend::setConstraints(const LinearConstraints& constraints) {
-
-	// remove previous constraints
-	for (std::vector<GRBConstr>::iterator constraint = _constraints.begin(); constraint != _constraints.end(); constraint++)
-		_model.remove(*constraint);
-	_constraints.clear();
-
-	_model.update();
-
-	// allocate memory for new constraints
-	_constraints.reserve(constraints.size());
-
-	try {
-
-		std::cout << "setting " << constraints.size() << " constraints" << std::endl;
-
-		for (LinearConstraints::const_iterator constraint = constraints.begin(); constraint != constraints.end(); constraint++) {
-            _constraints.push_back(createConstraint(*constraint));
-		}
-
-		_model.update();
-
-	} catch (GRBException e) {
-
-		std::cerr << "error: " << e.getMessage() << std::endl;
-	}
-}
-
-inline void
-GurobiBackend::addConstraint(const LinearConstraint& constraint) {
-
-    try {
-
-        std::cout << "adding a constraint" << std::endl;
-
-        _constraints.push_back(createConstraint(constraint));
-        _model.update();
-
-    } catch (GRBException e) {
-        std::cerr << "error: " << e.getMessage() << std::endl;
-    }
-}
-
-inline GRBConstr
-GurobiBackend::createConstraint(const LinearConstraint& constraint)
-{
-    // create the lhs expression
-    GRBLinExpr lhsExpr;
-
-    // set the coefficients
-    typedef std::map<unsigned int, double>::const_iterator CoefIt;
-    for (CoefIt pair = constraint.getCoefficients().begin(); pair != constraint.getCoefficients().end(); pair++)
-        lhsExpr += pair->second * _variables[pair->first];
-
-    // construct constraint
-    return _model.addConstr(
-                lhsExpr,
-                (constraint.getRelation() == LessEqual ? GRB_LESS_EQUAL :
-                                                          (constraint.getRelation() == GreaterEqual ? GRB_GREATER_EQUAL :
-                                                                                                       GRB_EQUAL)),
-                constraint.getValue());
-}
-
-inline bool
-GurobiBackend::solve(Solution& x, double& value, std::string& msg) {
-
-	try {
-
-		_model.optimize();
-
-		int status = _model.get(GRB_IntAttr_Status);
-
-		if (status != GRB_OPTIMAL) {
-			msg = "Optimal solution *NOT* found";
-			return false;
-		} else
-			msg = "Optimal solution found";
-
-		// extract solution
-
-		x.resize(_numVariables);
-		for (unsigned int i = 0; i < _numVariables; i++)
-			x[i] = _variables[i].get(GRB_DoubleAttr_X);
-
-		// get current value of the objective
-		value = _model.get(GRB_DoubleAttr_ObjVal);
-
-		x.setValue(value);
-
-	} catch (GRBException e) {
-
-		std::cerr << "error: " << e.getMessage() << std::endl;
-
-		msg = e.getMessage();
-
-		return false;
-	}
-
-	return true;
-}
-
-inline void
-GurobiBackend::setMIPGap(double gap) {
-
-	_model.getEnv().set(GRB_DoubleParam_MIPGap, gap);
-}
-
-inline void
-GurobiBackend::setMIPFocus(unsigned int focus) {
-
-	_model.getEnv().set(GRB_IntParam_MIPFocus, focus);
-}
-
-inline void
-GurobiBackend::setNumThreads(unsigned int numThreads) {
-
-	_model.getEnv().set(GRB_IntParam_Threads, numThreads);
-}
-
-inline void
-GurobiBackend::setVerbose(bool verbose) {
-
-	// setup GRB environment
-	if (verbose)
-		_model.getEnv().set(GRB_IntParam_OutputFlag, 1);
-	else
-		_model.getEnv().set(GRB_IntParam_OutputFlag, 0);
-}
-
-inline void
-GurobiBackend::dumpProblem(std::string filename) {
-
-	try {
-
-		_model.write(filename);
-
-	} catch (GRBException e) {
-
-		std::cerr << "error: " << e.getMessage() << std::endl;
-	}
-}
-
-}}} // namespace opengm::learning::solver
-
-#endif // WITH_GUROBI
-
-#endif // GUROBI_OPENGM_LEARNING_SOLVER_SOLVER_H__
-
-
diff --git a/include/opengm/learning/solver/LinearConstraint.h b/include/opengm/learning/solver/LinearConstraint.h
deleted file mode 100644
index bec224c..0000000
--- a/include/opengm/learning/solver/LinearConstraint.h
+++ /dev/null
@@ -1,94 +0,0 @@
-#ifndef INFERENCE_LINEAR_CONSTRAINT_H__
-#define INFERENCE_LINEAR_CONSTRAINT_H__
-
-#include <map>
-
-#include "Relation.h"
-
-namespace opengm {
-namespace learning {
-namespace solver {
-
-/**
- * A sparse linear constraint.
- */
-class LinearConstraint {
-
-public:
-
-	LinearConstraint();
-
-	void setCoefficient(unsigned int varNum, double coef);
-
-	void setRelation(Relation relation);
-
-	void setValue(double value);
-
-	const std::map<unsigned int, double>& getCoefficients() const;
-
-	const Relation& getRelation() const;
-
-	double getValue() const;
-
-private:
-
-	std::map<unsigned int, double> _coefs;
-
-	Relation _relation;
-
-	double _value;
-};
-
-inline
-LinearConstraint::LinearConstraint() :
-	_relation(LessEqual) {}
-
-inline void
-LinearConstraint::setCoefficient(unsigned int varNum, double coef) {
-
-	if (coef == 0) {
-
-		std::map<unsigned int, double>::iterator i = _coefs.find(varNum);
-		if (i != _coefs.end())
-			_coefs.erase(_coefs.find(varNum));
-
-	} else {
-
-		_coefs[varNum] = coef;
-	}
-}
-
-inline void
-LinearConstraint::setRelation(Relation relation) {
-
-	_relation = relation;
-}
-
-inline void
-LinearConstraint::setValue(double value) {
-
-	_value = value;
-}
-
-inline const std::map<unsigned int, double>&
-LinearConstraint::getCoefficients() const {
-
-	return _coefs;
-}
-
-inline const Relation&
-LinearConstraint::getRelation() const {
-
-	return _relation;
-}
-
-inline double
-LinearConstraint::getValue() const {
-
-	return _value;
-}
-
-}}} // namspace opengm::learning::solver
-
-#endif // INFERENCE_LINEAR_CONSTRAINT_H__
-
diff --git a/include/opengm/learning/solver/LinearConstraints.h b/include/opengm/learning/solver/LinearConstraints.h
deleted file mode 100644
index ef2d4f3..0000000
--- a/include/opengm/learning/solver/LinearConstraints.h
+++ /dev/null
@@ -1,119 +0,0 @@
-#ifndef INFERENCE_LINEAR_CONSTRAINTS_H__
-#define INFERENCE_LINEAR_CONSTRAINTS_H__
-
-#include "LinearConstraint.h"
-
-namespace opengm {
-namespace learning {
-namespace solver {
-
-class LinearConstraints {
-
-	typedef std::vector<LinearConstraint> linear_constraints_type;
-
-public:
-
-	typedef linear_constraints_type::iterator       iterator;
-
-	typedef linear_constraints_type::const_iterator const_iterator;
-
-	/**
-	 * Create a new set of linear constraints and allocate enough memory to hold
-	 * 'size' linear constraints. More or less constraints can be added, but
-	 * memory might be wasted (if more allocated then necessary) or unnecessary
-	 * reallocations might occur (if more added than allocated).
-	 *
-	 * @param size The number of linear constraints to reserve memory for.
-	 */
-	LinearConstraints(size_t size = 0);
-
-	/**
-	 * Remove all constraints from this set of linear constraints.
-	 */
-	void clear() { _linearConstraints.clear(); }
-
-	/**
-	 * Add a linear constraint.
-	 *
-	 * @param linearConstraint The linear constraint to add.
-	 */
-	void add(const LinearConstraint& linearConstraint);
-
-	/**
-	 * Add a set of linear constraints.
-	 *
-	 * @param linearConstraints The set of linear constraints to add.
-	 */
-	void addAll(const LinearConstraints& linearConstraints);
-
-	/**
-	 * @return The number of linear constraints in this set.
-	 */
-	unsigned int size() const { return _linearConstraints.size(); }
-
-	const const_iterator begin() const { return _linearConstraints.begin(); }
-
-	iterator begin() { return _linearConstraints.begin(); }
-
-	const const_iterator end() const { return _linearConstraints.end(); }
-
-	iterator end() { return _linearConstraints.end(); }
-
-	const LinearConstraint& operator[](size_t i) const { return _linearConstraints[i]; }
-
-	LinearConstraint& operator[](size_t i) { return _linearConstraints[i]; }
-
-	/**
-	 * Get a list of indices of linear constraints that use the given variables.
-	 */
-	std::vector<unsigned int> getConstraints(const std::vector<unsigned int>& variableIds);
-
-private:
-
-	linear_constraints_type _linearConstraints;
-};
-
-inline
-LinearConstraints::LinearConstraints(size_t size) {
-
-	_linearConstraints.resize(size);
-}
-
-inline void
-LinearConstraints::add(const LinearConstraint& linearConstraint) {
-
-	_linearConstraints.push_back(linearConstraint);
-}
-
-inline void
-LinearConstraints::addAll(const LinearConstraints& linearConstraints) {
-
-	_linearConstraints.insert(_linearConstraints.end(), linearConstraints.begin(), linearConstraints.end());
-}
-
-inline std::vector<unsigned int>
-LinearConstraints::getConstraints(const std::vector<unsigned int>& variableIds) {
-
-	std::vector<unsigned int> indices;
-
-	for (unsigned int i = 0; i < size(); i++) {
-
-		LinearConstraint& constraint = _linearConstraints[i];
-
-		for (std::vector<unsigned int>::const_iterator v = variableIds.begin(); v != variableIds.end(); v++) {
-
-			if (constraint.getCoefficients().count(*v) != 0) {
-
-				indices.push_back(i);
-				break;
-			}
-		}
-	}
-
-	return indices;
-}
-
-}}} // namespace opengm::learning::solver
-
-#endif // INFERENCE_LINEAR_CONSTRAINTS_H__
-
diff --git a/include/opengm/learning/solver/LinearObjective.h b/include/opengm/learning/solver/LinearObjective.h
deleted file mode 100644
index a8f1b9e..0000000
--- a/include/opengm/learning/solver/LinearObjective.h
+++ /dev/null
@@ -1,24 +0,0 @@
-#ifndef INFERENCE_LINEAR_OBJECTIVE_H__
-#define INFERENCE_LINEAR_OBJECTIVE_H__
-
-#include "QuadraticObjective.h"
-
-namespace opengm {
-namespace learning {
-namespace solver {
-
-class LinearObjective : public QuadraticObjective {
-
-public:
-
-	LinearObjective(unsigned int size = 0) : QuadraticObjective(size) {}
-
-private:
-
-	using QuadraticObjective::setQuadraticCoefficient;
-};
-
-}}} // namspace opengm::learning::solver
-
-#endif // INFERENCE_OBJECTIVE_H__
-
diff --git a/include/opengm/learning/solver/LinearSolverBackend.h b/include/opengm/learning/solver/LinearSolverBackend.h
deleted file mode 100644
index 6ba5b2c..0000000
--- a/include/opengm/learning/solver/LinearSolverBackend.h
+++ /dev/null
@@ -1,84 +0,0 @@
-#ifndef INFERENCE_LINEAR_SOLVER_BACKEND_H__
-#define INFERENCE_LINEAR_SOLVER_BACKEND_H__
-
-#include "LinearObjective.h"
-#include "LinearConstraints.h"
-#include "Solution.h"
-#include "VariableType.h"
-
-namespace opengm {
-namespace learning {
-namespace solver {
-
-class LinearSolverBackend {
-
-public:
-
-	virtual ~LinearSolverBackend() {};
-
-	/**
-	 * Initialise the linear solver for the given type of variables.
-	 *
-	 * @param numVariables The number of variables in the problem.
-	 * @param variableType The type of the variables (Continuous, Integer,
-	 *                     Binary).
-	 */
-	virtual void initialize(
-			unsigned int numVariables,
-			VariableType variableType) = 0;
-
-	/**
-	 * Initialise the linear solver for the given type of variables.
-	 *
-	 * @param numVariables
-	 *             The number of variables in the problem.
-	 * 
-	 * @param defaultVariableType
-	 *             The default type of the variables (Continuous, Integer, 
-	 *             Binary).
-	 *
-	 * @param specialVariableTypes
-	 *             A map of variable numbers to variable types to override the 
-	 *             default.
-	 */
-	virtual void initialize(
-			unsigned int                                numVariables,
-			VariableType                                defaultVariableType,
-			const std::map<unsigned int, VariableType>& specialVariableTypes) = 0;
-
-	/**
-	 * Set the objective.
-	 *
-	 * @param objective A linear objective.
-	 */
-	virtual void setObjective(const LinearObjective& objective) = 0;
-
-	/**
-	 * Set the linear (in)equality constraints.
-	 *
-	 * @param constraints A set of linear constraints.
-	 */
-	virtual void setConstraints(const LinearConstraints& constraints) = 0;
-
-	/**
-	 * Add a single linear constraint.
-	 *
-	 * @param constraint The constraint to add.
-	 */
-	virtual void addConstraint(const LinearConstraint& constraint) = 0;
-
-	/**
-	 * Solve the problem.
-	 *
-	 * @param solution A solution object to write the solution to.
-	 * @param value The optimal value of the objective.
-	 * @param message A status message from the solver.
-	 * @return true, if the optimal value was found.
-	 */
-	virtual bool solve(Solution& solution, double& value, std::string& message) = 0;
-};
-
-}}} // namespace opengm::learning::solver
-
-#endif // INFERENCE_LINEAR_SOLVER_BACKEND_H__
-
diff --git a/include/opengm/learning/solver/QuadraticObjective.h b/include/opengm/learning/solver/QuadraticObjective.h
deleted file mode 100644
index f0ffcc7..0000000
--- a/include/opengm/learning/solver/QuadraticObjective.h
+++ /dev/null
@@ -1,181 +0,0 @@
-#ifndef INFERENCE_QUADRATIC_OBJECTIVE_H__
-#define INFERENCE_QUADRATIC_OBJECTIVE_H__
-
-#include <map>
-
-#include "Sense.h"
-
-namespace opengm {
-namespace learning {
-namespace solver {
-
-class QuadraticObjective {
-
-public:
-
-	/**
-	 * Create a new quadratic objective for 'size' varibales.
-	 *
-	 * @param size The number of coefficients in the objective.
-	 */
-	QuadraticObjective(unsigned int size = 0);
-
-	/**
-	 * Set the constant value of the expression.
-	 *
-	 * @param constant The value of the constant part of the objective.
-	 */
-	void setConstant(double constant);
-
-	/**
-	 * @return The value of the constant part of the objective.
-	 */
-	double getConstant() const;
-
-	/**
-	 * Add a coefficient.
-	 *
-	 * @param varNum The number of the variable to add the coefficient for.
-	 * @param coef The value of the coefficient.
-	 */
-	void setCoefficient(unsigned int varNum, double coef);
-
-	/**
-	 * Get the linear coefficients of this objective as a map of variable
-	 * numbers to coefficient values.
-	 *
-	 * @return A map from variable numbers to coefficient values.
-	 */
-	const std::vector<double>& getCoefficients() const;
-
-	/**
-	 * Add a quadratic coefficient. Use this to fill the Q matrix in the
-	 * objective <a,x> + xQx.
-	 *
-	 * @param varNum1 The row of Q.
-	 * @param varNum2 The columnt of Q.
-	 * @param coef The value of the coefficient.
-	 */
-	void setQuadraticCoefficient(unsigned int varNum1, unsigned int varNum2, double coef);
-
-	/**
-	 * Get the quadratic coefficients of this objective as a map of pairs of variable
-	 * numbers to coefficient values.
-	 *
-	 * @return A map from pairs of variable numbers to coefficient values.
-	 */
-	const std::map<std::pair<unsigned int, unsigned int>, double>& getQuadraticCoefficients() const;
-
-	/**
-	 * Set the sense of the objective.
-	 *
-	 * @param sense Minimize or Maximize.
-	 */
-	void setSense(Sense sense);
-
-	/**
-	 * Get the sense of this objective.
-	 *
-	 * @return Minimize or Maximize.
-	 */
-	Sense getSense() const;
-
-	/**
-	 * Resize the objective. New coefficients will be set to zero.
-	 *
-	 * @param The new size of the objective.
-	 */
-	void resize(unsigned int size);
-
-	/**
-	 * Get the number of variables in this objective.
-	 *
-	 * @return The number of variables in this objective.
-	 */
-	unsigned int size() const { return _coefs.size(); }
-
-private:
-
-	Sense _sense;
-
-	double _constant;
-
-	// linear coefficients are assumed to be dense, therefore we use a vector
-	std::vector<double> _coefs;
-
-	std::map<std::pair<unsigned int, unsigned int>, double> _quadraticCoefs;
-};
-
-inline
-QuadraticObjective::QuadraticObjective(unsigned int size) :
-	_sense(Minimize),
-	_constant(0) {
-
-	resize(size);
-}
-
-inline void
-QuadraticObjective::setConstant(double constant) {
-
-	_constant = constant;
-}
-
-inline double
-QuadraticObjective::getConstant() const {
-
-	return _constant;
-}
-
-inline void
-QuadraticObjective::setCoefficient(unsigned int varNum, double coef) {
-
-	_coefs[varNum] = coef;
-}
-
-inline const std::vector<double>&
-QuadraticObjective::getCoefficients() const {
-
-	return _coefs;
-}
-
-inline void
-QuadraticObjective::setQuadraticCoefficient(unsigned int varNum1, unsigned int varNum2, double coef) {
-
-	if (coef == 0) {
-
-		_quadraticCoefs.erase(_quadraticCoefs.find(std::make_pair(varNum1, varNum2)));
-
-	} else {
-
-		_quadraticCoefs[std::make_pair(varNum1, varNum2)] = coef;
-	}
-}
-
-inline const std::map<std::pair<unsigned int, unsigned int>, double>&
-QuadraticObjective::getQuadraticCoefficients() const {
-
-	return _quadraticCoefs;
-}
-
-inline void
-QuadraticObjective::setSense(Sense sense) {
-
-	_sense = sense;
-}
-
-inline Sense
-QuadraticObjective::getSense() const {
-
-	return _sense;
-}
-
-inline void
-QuadraticObjective::resize(unsigned int size) {
-
-	_coefs.resize(size, 0.0);
-}
-
-}}} // namespace opengm::learning::solver
-
-#endif // INFERENCE_QUADRATIC_OBJECTIVE_H__
-
diff --git a/include/opengm/learning/solver/QuadraticSolverBackend.h b/include/opengm/learning/solver/QuadraticSolverBackend.h
deleted file mode 100644
index cc3a160..0000000
--- a/include/opengm/learning/solver/QuadraticSolverBackend.h
+++ /dev/null
@@ -1,28 +0,0 @@
-#ifndef INFERENCE_QUADRATIC_SOLVER_BACKEND_H__
-#define INFERENCE_QUADRATIC_SOLVER_BACKEND_H__
-
-#include "QuadraticObjective.h"
-#include "LinearSolverBackend.h"
-
-namespace opengm {
-namespace learning {
-namespace solver {
-
-class QuadraticSolverBackend : public LinearSolverBackend {
-
-public:
-
-	virtual ~QuadraticSolverBackend() {};
-
-	/**
-	 * Set the objective.
-	 *
-	 * @param objective A quadratic objective.
-	 */
-	virtual void setObjective(const QuadraticObjective& objective) = 0;
-};
-
-}}} // namspace opengm::learning::solver
-
-#endif // INFERENCE_QUADRATIC_SOLVER_BACKEND_H__
-
diff --git a/include/opengm/learning/solver/QuadraticSolverFactory.h b/include/opengm/learning/solver/QuadraticSolverFactory.h
deleted file mode 100644
index e986630..0000000
--- a/include/opengm/learning/solver/QuadraticSolverFactory.h
+++ /dev/null
@@ -1,33 +0,0 @@
-#ifndef OPENGM_LEARNING_SOLVER_QUADRATIC_SOLVER_FACTORY_H__
-#define OPENGM_LEARNING_SOLVER_QUADRATIC_SOLVER_FACTORY_H__
-
-#ifdef WITH_GUROBI
-#include "GurobiBackend.h"
-#elif defined(WITH_CPLEX)
-#include "CplexBackend.h"
-#endif
-
-namespace opengm {
-namespace learning {
-namespace solver {
-
-class QuadraticSolverFactory {
-
-public:
-
-	static QuadraticSolverBackend* Create() {
-
-#ifdef WITH_GUROBI
-	return new GurobiBackend();
-#elif defined(WITH_CPLEX)
-        return new CplexBackend();
-#endif
-
-      throw opengm::RuntimeError("No quadratic solver available.");
-	}
-};
-
-}}} // namespace opengm::learning::solver
-
-#endif // OPENGM_LEARNING_SOLVER_QUADRATIC_SOLVER_FACTORY_H__
-
diff --git a/include/opengm/learning/solver/QuadraticSolverParameters.h b/include/opengm/learning/solver/QuadraticSolverParameters.h
deleted file mode 100644
index 42486e8..0000000
--- a/include/opengm/learning/solver/QuadraticSolverParameters.h
+++ /dev/null
@@ -1,15 +0,0 @@
-#ifndef INFERENCE_QUADRATIC_SOLVER_PARAMETERS_H__
-#define INFERENCE_QUADRATIC_SOLVER_PARAMETERS_H__
-
-#include "LinearSolverParameters.h"
-
-namespace opengm {
-namespace learning {
-namespace solver {
-
-class QuadraticSolverParameters : public LinearSolverParameters {};
-
-}}} // namspace opengm::learning::solver
-
-#endif // INFERENCE_QUADRATIC_SOLVER_PARAMETERS_H__
-
diff --git a/include/opengm/learning/solver/Relation.h b/include/opengm/learning/solver/Relation.h
deleted file mode 100644
index 7364591..0000000
--- a/include/opengm/learning/solver/Relation.h
+++ /dev/null
@@ -1,20 +0,0 @@
-#ifndef INFERENCE_RELATION_H__
-#define INFERENCE_RELATION_H__
-
-namespace opengm {
-namespace learning {
-namespace solver {
-
-/** Used to indicate the relation of a linear constraint.
- */
-enum Relation {
-
-	LessEqual,
-	Equal,
-	GreaterEqual
-};
-
-}}} // namspace opengm::learning::solver
-
-#endif // INFERENCE_RELATION_H__
-
diff --git a/include/opengm/learning/solver/Sense.h b/include/opengm/learning/solver/Sense.h
deleted file mode 100644
index 3f50c3a..0000000
--- a/include/opengm/learning/solver/Sense.h
+++ /dev/null
@@ -1,20 +0,0 @@
-#ifndef SENSE_H__
-#define SENSE_H__
-
-namespace opengm {
-namespace learning {
-namespace solver {
-
-/** Used to indicate whether an objective is supposed to be minimized or
- * maximized.
- */
-enum Sense {
-
-	Minimize,
-	Maximize
-};
-
-}}} // namspace opengm::learning::solver
-
-#endif // SENSE_H__
-
diff --git a/include/opengm/learning/solver/Solution.h b/include/opengm/learning/solver/Solution.h
deleted file mode 100644
index 8016bda..0000000
--- a/include/opengm/learning/solver/Solution.h
+++ /dev/null
@@ -1,49 +0,0 @@
-#ifndef INFERENCE_SOLUTION_H__
-#define INFERENCE_SOLUTION_H__
-
-namespace opengm {
-namespace learning {
-namespace solver {
-
-class Solution {
-
-public:
-
-	Solution(unsigned int size = 0);
-
-	void resize(unsigned int size);
-
-	unsigned int size() const { return _solution.size(); }
-
-	const double& operator[](unsigned int i) const { return _solution[i]; }
-
-	double& operator[](unsigned int i) { return _solution[i]; }
-
-	std::vector<double>& getVector() { return _solution; }
-
-	void setValue(double value) { _value = value; }
-
-	double getValue() { return _value; }
-
-private:
-
-	std::vector<double> _solution;
-
-	double _value;
-};
-
-inline Solution::Solution(unsigned int size) {
-
-	resize(size);
-}
-
-inline void
-Solution::resize(unsigned int size) {
-
-	_solution.resize(size);
-}
-
-}}} // namspace opengm::learning::solver
-
-#endif // INFERENCE_SOLUTION_H__
-
diff --git a/include/opengm/learning/solver/VariableType.h b/include/opengm/learning/solver/VariableType.h
deleted file mode 100644
index d107a41..0000000
--- a/include/opengm/learning/solver/VariableType.h
+++ /dev/null
@@ -1,18 +0,0 @@
-#ifndef INFERENCE_VARIABLE_TYPE_H__
-#define INFERENCE_VARIABLE_TYPE_H__
-
-namespace opengm {
-namespace learning {
-namespace solver {
-
-enum VariableType {
-
-	Continuous,
-	Integer,
-	Binary
-};
-
-}}} // namspace opengm::learning::solver
-
-#endif // INFERENCE_VARIABLE_TYPE_H__
-
diff --git a/include/opengm/learning/struct-max-margin.hxx b/include/opengm/learning/struct-max-margin.hxx
deleted file mode 100644
index 8af78ed..0000000
--- a/include/opengm/learning/struct-max-margin.hxx
+++ /dev/null
@@ -1,219 +0,0 @@
-#pragma once
-#ifndef OPENGM_LEARNING_STRUCT_MAX_MARGIN_HXX
-#define OPENGM_LEARNING_STRUCT_MAX_MARGIN_HXX
-
-#include "bundle-optimizer.hxx"
-#include "gradient-accumulator.hxx"
-
-#ifdef WITH_OPENMP
-#include <omp.h>
-#endif
-
-namespace opengm {
-
-namespace learning {
-
-template <
-		typename DS,
-		typename O = BundleOptimizer<typename DS::ValueType> >
-class StructMaxMargin {
-
-public:
-
-	typedef DS DatasetType;
-	typedef O  OptimizerType;
-
-    typedef typename DatasetType::GMType GMType;
-    typedef typename DatasetType::GMWITHLOSS GMWITHLOSS;
-	typedef typename DatasetType::ValueType       ValueType;
-    typedef typename DatasetType::Weights         Weights;
-
-	struct Parameter {
-        typedef typename OptimizerType::Parameter OptimizerParameter;
-        OptimizerParameter optimizerParameter_;
-	};
-
-	StructMaxMargin(DatasetType& dataset, const Parameter& parameter = Parameter()) :
-		_dataset(dataset),
-        _parameter(parameter),
-        _optimizer(parameter.optimizerParameter_)
-    {}
-
-	Parameter& parameter() { return _parameter; }
-
-    template <typename InferenceType>
-    void learn(const typename InferenceType::Parameter& parameter);
-
-    const Weights& getWeights() { return _weights; }
-
-private:
-
-	template <typename InferenceType>
-	class Oracle {
-
-		public:
-
-            Oracle(DatasetType& dataset, const typename InferenceType::Parameter& infParam) :
-                _dataset(dataset),
-                _infParam(infParam)
-            {}
-
-			/**
-			 * Evaluate the loss-augmented energy value of the dataset and its 
-			 * gradient at w.
-			 */
-            void operator()(const Weights& w, double& value, Weights& gradient) {
-
-				typedef std::vector<typename InferenceType::LabelType> ConfigurationType;
-
-				// initialize gradient and value with zero
-				for (int i = 0; i < gradient.numberOfWeights(); i++)
-					gradient[i] = 0;
-				value = 0;
-
-				// For each model E(y,w), we have to compute the value and 
-				// gradient of
-				//
-				//   max_y E(y',w) - E(y,w) + Δ(y',y)            (1)
-				//   =
-				//   max_y L(y,w)
-				//
-				// where y' is the best-effort solution (also known as 
-				// groundtruth) and w are the current weights. The loss 
-				// augmented model given by the dataset is
-				//
-				//   F(y,w) = E(y,w) - Δ(y',y).
-				//
-				// Let c = E(y',w) be the constant contribution of the 
-				// best-effort solution. (1) is equal to
-				//
-				//  -min_y -c + F(y,w).
-				//
-				// The gradient of the maximand in (1) at y* is
-				//
-				//   ∂L(y,w)/∂w = ∂E(y',w)/∂w -
-				//                ∂E(y,w)/∂w
-				//
-				//              = Σ_θ ∂θ(y'_θ,w)/∂w -
-				//                Σ_θ ∂θ(y_θ,w)/∂w,
-				//
-				// which is a positive gradient contribution for the 
-				// best-effort, and a negative contribution for the maximizer 
-				// y*.
-
-				// set the weights w in E(x,y) and F(x,y)
-				_dataset.getWeights() = w;
-
-                //if(_infParam.verbose_ )
-                //    std::cout << std::endl << " MODEL : ";
-
-                #ifdef WITH_OPENMP
-                omp_lock_t modelLock;
-                omp_init_lock(&modelLock);
-                #pragma omp parallel for
-                #endif
-                for (int i = 0; i < _dataset.getNumberOfModels(); i++) {
-                    // if(_infParam.verbose_ )
-                    //     std::cout << i;
-
-                    // lock the model
-                    #ifdef WITH_OPENMP
-                    omp_set_lock(&modelLock);
-                    _dataset.lockModel(i);
-                    omp_unset_lock(&modelLock);
-                    #else
-                    _dataset.lockModel(i);
-                    #endif
-                    // get E(x,y) and F(x,y)
-                    const GMType &     gm  = _dataset.getModel(i);
-					const GMWITHLOSS & gml = _dataset.getModelWithLoss(i);
-
-					// get the best-effort solution y'
-					const ConfigurationType& bestEffort = _dataset.getGT(i);
-
-					// compute constant c for current w
-					ValueType c = gm.evaluate(bestEffort);
-
-					// find the minimizer y* of F(y,w)
-					ConfigurationType mostViolated;
-                    InferenceType inference(gml, _infParam);
-
-                    inference.infer();
-                    inference.arg(mostViolated);
-
-					// the optimal value of (1) is now c - F(y*,w)
-                    #pragma omp atomic
-                    value += c - gml.evaluate(mostViolated);
-
-					// the gradients are
-					typedef GradientAccumulator<Weights, ConfigurationType> GA;
-                    GA gaBestEffort(gradient, bestEffort, GA::Add);
-                    GA gaMostViolated(gradient, mostViolated, GA::Subtract);
-                    for (size_t j = 0; j < gm.numberOfFactors(); j++) {
-
-						gm[j].callViFunctor(gaBestEffort);
-						gm[j].callViFunctor(gaMostViolated);
-					}
-
-                    // unlock the model
-                    #ifdef WITH_OPENMP
-                    omp_set_lock(&modelLock);
-                    _dataset.unlockModel(i);
-                    omp_unset_lock(&modelLock);
-                    #else
-                    _dataset.unlockModel(i);
-                    #endif
-                } // end for model
-			}
-
-            const typename InferenceType::Parameter& getInfParam(){
-                return _infParam;
-            }
-
-		private:
-
-			DatasetType& _dataset;
-            const typename InferenceType::Parameter& _infParam;
-	};
-
-	DatasetType& _dataset;
-
-	Parameter _parameter;
-
-	OptimizerType _optimizer;
-
-    Weights _weights;
-};
-
-template <typename DS, typename O>
-template <typename InferenceType>
-void
-StructMaxMargin<DS, O>::learn(const typename InferenceType::Parameter& infParams) {
-
-    typedef typename InferenceType:: template RebindGm<GMWITHLOSS>::type InfType;
-
-    typedef typename InfType::Parameter InfTypeParam;
-    InfTypeParam infTypeParam(infParams);
-    Oracle<InfType> oracle(_dataset, infTypeParam);
-
-	_weights = _dataset.getWeights();
-
-	// minimize structured loss
-    OptimizerResult result = _optimizer.optimize(oracle, _weights);
-
-	if (result == Error)
-		throw opengm::RuntimeError("optimizer did not succeed");
-
-	if (result == ReachedMinGap)
-		std::cout << "optimization converged to requested precision" << std::endl;
-
-	if (result == ReachedSteps)
-        std::cout << "optimization stopped after " << parameter().optimizerParameter_.steps << " iterations" << std::endl;
-}
-
-} // namespace learning
-
-} // namespace opengm
-
-#endif // OPENGM_LEARNING_STRUCT_MAX_MARGIN_HXX
-
diff --git a/include/opengm/learning/structured_perceptron.hxx b/include/opengm/learning/structured_perceptron.hxx
deleted file mode 100644
index f66cd07..0000000
--- a/include/opengm/learning/structured_perceptron.hxx
+++ /dev/null
@@ -1,208 +0,0 @@
-#pragma once
-#ifndef OPENGM_STRUCT_PERCEPTRON_LEARNER_HXX
-#define OPENGM_STRUCT_PERCEPTRON_LEARNER_HXX
-
-#include <vector>
-#include <opengm/inference/inference.hxx>
-#include <opengm/graphicalmodel/weights.hxx>
-#include <opengm/utilities/random.hxx>
-#include <opengm/learning/gradient-accumulator.hxx>
-
-
-namespace opengm {
-    namespace learning {
-
-
-
-           
-    template<class DATASET>
-    class StructuredPerceptron
-    {
-    public: 
-        typedef DATASET DatasetType;
-        typedef typename DATASET::GMType   GMType; 
-        typedef typename DATASET::LossType LossType;
-        typedef typename GMType::ValueType ValueType;
-        typedef typename GMType::IndexType IndexType;
-        typedef typename GMType::LabelType LabelType; 
-
-        typedef typename std::vector<LabelType>::const_iterator LabelIterator;
-        typedef FeatureAccumulator<GMType, LabelIterator> FeatureAcc;
-
-        class Parameter{
-        public:
-
-            enum LearningMode{
-                Online = 0,
-                Batch = 2
-            };
-
-
-            Parameter(){
-                eps_ = 0.00001;
-                maxIterations_ = 10000;
-                stopLoss_ = 0.0;
-                decayExponent_ = 0.0;
-                decayT0_ = 0.0;
-                learningMode_ = Online;
-            }       
-
-            double eps_;
-            size_t maxIterations_;
-            double stopLoss_;
-            double decayExponent_;
-            double decayT0_;
-            LearningMode learningMode_;
-        };
-
-
-        StructuredPerceptron(DATASET&, const Parameter& );
-
-        template<class INF>
-        void learn(const typename INF::Parameter& para); 
-        //template<class INF, class VISITOR>
-        //void learn(typename INF::Parameter para, VITITOR vis);
-
-        const opengm::learning::Weights<double>& getWeights(){return weights_;}
-        Parameter& getLerningParameters(){return para_;}
-
-
-        double getLearningRate( )const{
-            if(para_.decayExponent_<=0.000000001 && para_.decayExponent_>=-0.000000001 ){
-                return 1.0;
-            }
-            else{
-                return std::pow(para_.decayT0_ + static_cast<double>(iteration_+1),para_.decayExponent_);
-            }
-        }
-
-    private:
-
-        double updateWeights();
-
-        DATASET& dataset_;
-        opengm::learning::Weights<double> weights_;
-        Parameter para_;
-        size_t iteration_;
-        FeatureAcc featureAcc_;
-    }; 
-
-    template<class DATASET>
-    StructuredPerceptron<DATASET>::StructuredPerceptron(DATASET& ds, const Parameter& p )
-    : dataset_(ds), para_(p),iteration_(0),featureAcc_(ds.getNumberOfWeights(),false)
-    {
-        featureAcc_.resetWeights();
-        weights_ = opengm::learning::Weights<double>(ds.getNumberOfWeights());
-  
-    }
-
-
-    template<class DATASET>
-    template<class INF>
-    void StructuredPerceptron<DATASET>::learn(const typename INF::Parameter& para){
-
-
-        const size_t nModels = dataset_.getNumberOfModels();
-        const size_t nWegihts = dataset_.getNumberOfWeights();
-
-        
-
-
-
-
-        if(para_.learningMode_ == Parameter::Online){
-            RandomUniform<size_t> randModel(0, nModels);
-            std::cout<<"online mode\n";
-            for(iteration_=0 ; iteration_<para_.maxIterations_; ++iteration_){
-
-                if(iteration_%nModels==0){
-                    std::cout<<"loss :"<<dataset_. template getTotalLoss<INF>(para)<<"\n";
-                }
-
-
-                // get random model
-                const size_t gmi = randModel();
-                // lock the model
-                dataset_.lockModel(gmi);
-                const GMType & gm = dataset_.getModel(gmi);
-
-                // do inference
-                std::vector<LabelType> arg;
-                opengm::infer<INF>(gm, para, arg);
-                featureAcc_.resetWeights();
-                featureAcc_.accumulateModelFeatures(gm, dataset_.getGT(gmi).begin(), arg.begin());
-                dataset_.unlockModel(gmi);
-
-                // update weights
-                const double wChange =updateWeights();
-
-            }
-        }
-        else if(para_.learningMode_ == Parameter::Batch){
-            std::cout<<"batch mode\n";
-            for(iteration_=0 ; iteration_<para_.maxIterations_; ++iteration_){
-                // this 
-                if(iteration_%1==0){
-                    std::cout<<"loss :"<<dataset_. template getTotalLoss<INF>(para)<<"\n";
-                }
-
-                // reset the weights
-                featureAcc_.resetWeights();
-
-
-
-
-                //#pragma omp parallel for
-                for(size_t gmi=0; gmi<nModels; ++gmi)
-                {
-                    
-                    // lock the model
-                    //omp_set_lock(&modelLockUnlock);
-                    dataset_.lockModel(gmi);     
-                    //omp_unset_lock(&modelLockUnlock);
-                        
-                    
-
-                    const GMType & gm = dataset_.getModel(gmi);
-                    //run inference
-                    std::vector<LabelType> arg;
-                    opengm::infer<INF>(gm, para, arg);
-
-
-                    // 
-                    FeatureAcc featureAcc(nWegihts,false);
-                    featureAcc.accumulateModelFeatures(gm, dataset_.getGT(gmi).begin(), arg.begin());
-
-
-                    featureAcc_.accumulateFromOther(featureAcc);
-                    dataset_.unlockModel(gmi);    
-
-
-                }
-
-                // update the weights
-                const double wChange =updateWeights();
-
-            }
-        }
-
-        weights_ = dataset_.getWeights();
-    }
-
-
-    template<class DATASET>
-    double StructuredPerceptron<DATASET>::updateWeights(){
-        double wChange = 0.0;
-        const size_t nWegihts = dataset_.getNumberOfWeights();
-        for(size_t wi=0; wi<nWegihts; ++wi){
-            const double wOld = dataset_.getWeights().getWeight(wi);
-            const double wNew = wOld + getLearningRate()*featureAcc_.getWeight(wi);
-            wChange += std::pow(wOld-wNew,2);
-            dataset_.getWeights().setWeight(wi, wNew);
-        }
-        weights_ = dataset_.getWeights();
-        return wChange;
-    }
-}
-}
-#endif
diff --git a/include/opengm/learning/subgradient_ssvm.hxx b/include/opengm/learning/subgradient_ssvm.hxx
deleted file mode 100644
index 67514f9..0000000
--- a/include/opengm/learning/subgradient_ssvm.hxx
+++ /dev/null
@@ -1,353 +0,0 @@
-#pragma once
-#ifndef OPENGM_SUBGRADIENT_SSVM_LEARNER_HXX
-#define OPENGM_SUBGRADIENT_SSVM_LEARNER_HXX
-
-#include <iomanip>
-#include <vector>
-#include <opengm/inference/inference.hxx>
-#include <opengm/graphicalmodel/weights.hxx>
-#include <opengm/utilities/random.hxx>
-#include <opengm/learning/gradient-accumulator.hxx>
-#include <opengm/learning/weight_averaging.hxx>
-
-#ifdef WITH_OPENMP
-#include <omp.h>
-#endif
-
-#include <boost/circular_buffer.hpp>
-
-
-
-namespace opengm {
-    namespace learning {
-
-
-
-           
-    template<class DATASET>
-    class SubgradientSSVM
-    {
-    public: 
-        typedef DATASET DatasetType;
-        typedef typename DATASET::GMType   GMType; 
-        typedef typename DATASET::GMWITHLOSS GMWITHLOSS;
-        typedef typename DATASET::LossType LossType;
-        typedef typename GMType::ValueType ValueType;
-        typedef typename GMType::IndexType IndexType;
-        typedef typename GMType::LabelType LabelType; 
-        typedef opengm::learning::Weights<double> WeightsType;
-        typedef typename std::vector<LabelType>::const_iterator LabelIterator;
-        typedef FeatureAccumulator<GMType, LabelIterator> FeatureAcc;
-
-        typedef std::vector<LabelType> ConfType;
-        typedef boost::circular_buffer<ConfType> ConfBuffer;
-        typedef std::vector<ConfBuffer> ConfBufferVec;
-
-        class Parameter{
-        public:
-
-            enum LearningMode{
-                Online = 0,
-                Batch = 1
-            };
-
-
-            Parameter(){
-                eps_ = 0.00001;
-                maxIterations_ = 10000;
-                stopLoss_ = 0.0;
-                learningRate_ = 1.0;
-                C_ = 1.0;
-                learningMode_ = Batch;
-                averaging_ = -1;
-                nConf_ = 0;
-            }       
-
-            double eps_;
-            size_t maxIterations_;
-            double stopLoss_;
-            double learningRate_;
-            double C_;
-            LearningMode learningMode_;
-            int averaging_;
-            int nConf_;
-        };
-
-
-        SubgradientSSVM(DATASET&, const Parameter& );
-
-        template<class INF>
-        void learn(const typename INF::Parameter& para); 
-        //template<class INF, class VISITOR>
-        //void learn(typename INF::Parameter para, VITITOR vis);
-
-        const opengm::learning::Weights<double>& getWeights(){return weights_;}
-        Parameter& getLerningParameters(){return para_;}
-
-
-        double getLearningRate( )const{
-            if(para_.decayExponent_<=0.000000001 && para_.decayExponent_>=-0.000000001 ){
-                return 1.0;
-            }
-            else{
-                return std::pow(para_.decayT0_ + static_cast<double>(iteration_),para_.decayExponent_);
-            }
-        }
-
-        double getLoss(const GMType & gm ,const GMWITHLOSS  & gmWithLoss, std::vector<LabelType> & labels){
-
-            double loss = 0 ;
-            std::vector<LabelType> subConf(20,0);
-
-            for(size_t fi=gm.numberOfFactors(); fi<gmWithLoss.numberOfFactors(); ++fi){
-                for(size_t v=0; v<gmWithLoss[fi].numberOfVariables(); ++v){
-                    subConf[v] = labels[ gmWithLoss[fi].variableIndex(v)];
-                }
-                loss +=  gmWithLoss[fi](subConf.begin());
-            }
-            return loss;
-        }
-
-    private:
-
-        double updateWeights();
-
-        DATASET& dataset_;
-        WeightsType  weights_;
-        Parameter para_;
-        size_t iteration_;
-        FeatureAcc featureAcc_;
-        WeightRegularizer<ValueType> wReg_;
-        WeightAveraging<double> weightAveraging_;
-    }; 
-
-    template<class DATASET>
-    SubgradientSSVM<DATASET>::SubgradientSSVM(DATASET& ds, const Parameter& p )
-    :   dataset_(ds), 
-        para_(p),
-        iteration_(0),
-        featureAcc_(ds.getNumberOfWeights()),
-        wReg_(2, 1.0/p.C_),
-        weightAveraging_(ds.getWeights(),p.averaging_)
-    {
-        featureAcc_.resetWeights();
-        weights_ = opengm::learning::Weights<double>(ds.getNumberOfWeights());
-    }
-
-
-    template<class DATASET>
-    template<class INF>
-    void SubgradientSSVM<DATASET>::learn(const typename INF::Parameter& para){
-
-
-        typedef typename INF:: template RebindGm<GMWITHLOSS>::type InfLossGm;
-        typedef typename InfLossGm::Parameter InfLossGmParam;
-        InfLossGmParam infLossGmParam(para);
-
-
-        const size_t nModels = dataset_.getNumberOfModels();
-        const size_t nWegihts = dataset_.getNumberOfWeights();
-
-        
-        for(size_t wi=0; wi<nWegihts; ++wi){
-            dataset_.getWeights().setWeight(wi, 0.0);
-        }
-        std::cout<<"PARAM nConf_"<<para_.nConf_<<"\n";
-        const bool useWorkingSets = para_.nConf_>0;
-
-        ConfBufferVec buffer(useWorkingSets? nModels : 0, ConfBuffer(para_.nConf_));
-
-        std::vector<bool> isViolated(para_.nConf_);
-
-        if(para_.learningMode_ == Parameter::Online){
-            RandomUniform<size_t> randModel(0, nModels);
-            //std::cout<<"online mode\n";
-            for(iteration_=0 ; iteration_<para_.maxIterations_; ++iteration_){
-
-
-
-
-                // get random model
-                const size_t gmi = randModel();
-                // lock the model
-                dataset_.lockModel(gmi);
-                const GMWITHLOSS & gmWithLoss = dataset_.getModelWithLoss(gmi);
-
-                // do inference
-                std::vector<LabelType> arg;
-                opengm::infer<InfLossGm>(gmWithLoss, infLossGmParam, arg);
-                featureAcc_.resetWeights();
-                featureAcc_.accumulateModelFeatures(dataset_.getModel(gmi), dataset_.getGT(gmi).begin(), arg.begin());
-                dataset_.unlockModel(gmi);
-
-                // update weights
-                const double wChange =updateWeights();
-
-                if(iteration_%nModels*2 == 0 ){
-                    std::cout << '\r'
-                              << std::setw(6) << std::setfill(' ') << iteration_ << ':'
-                              << std::setw(8) << dataset_. template getTotalLossParallel<INF>(para) <<"  "<< std::flush;
-
-                }
-
-            }
-        }
-        else if(para_.learningMode_ == Parameter::Batch){
-            //std::cout<<"batch mode\n";
-            for(iteration_=0 ; iteration_<para_.maxIterations_; ++iteration_){
-                // this 
-                
-
-                // reset the weights
-                featureAcc_.resetWeights();
-                double totalLoss = 0;
-
-                #ifdef WITH_OPENMP
-                omp_lock_t modelLockUnlock;
-                omp_init_lock(&modelLockUnlock);
-                omp_lock_t featureAccLock;
-                omp_init_lock(&featureAccLock);
-                #pragma omp parallel for reduction(+:totalLoss)  
-                #endif
-                for(size_t gmi=0; gmi<nModels; ++gmi){
-                    
-                    // lock the model
-                    #ifdef WITH_OPENMP
-                    omp_set_lock(&modelLockUnlock);
-                    dataset_.lockModel(gmi);     
-                    omp_unset_lock(&modelLockUnlock);
-                    #else
-                    dataset_.lockModel(gmi);     
-                    #endif
-                        
-                    
-
-                    const GMWITHLOSS & gmWithLoss = dataset_.getModelWithLoss(gmi);
-                    const GMType     & gm = dataset_.getModel(gmi);
-                    //run inference
-                    std::vector<LabelType> arg;
-                    opengm::infer<InfLossGm>(gmWithLoss, infLossGmParam, arg);
-
-                    totalLoss = totalLoss + getLoss(gm, gmWithLoss, arg);
-
-             
-                    if(useWorkingSets){
-                        // append current solution
-                        buffer[gmi].push_back(arg);
-
-                        size_t vCount=0;
-                        // check which violates
-                        for(size_t cc=0; cc<buffer[gmi].size(); ++cc){
-                            const double mLoss = dataset_.getLoss(buffer[gmi][cc], gmi);
-                            const double argVal = gm.evaluate(buffer[gmi][cc]);
-                            const double gtVal =  gm.evaluate(dataset_.getGT(gmi));
-                            const double ll = (argVal - mLoss) - gtVal;
-                            //std::cout<<" argVal "<<argVal<<" gtVal "<<gtVal<<" mLoss "<<mLoss<<"   VV "<<ll<<"\n";
-                            if(ll<0){
-                                isViolated[cc] = true;
-                                ++vCount;
-                            }
-                        }
-                        FeatureAcc featureAcc(nWegihts);
-                        for(size_t cc=0; cc<buffer[gmi].size(); ++cc){
-                            if(isViolated[cc]){
-
-                                featureAcc.accumulateModelFeatures(gm, dataset_.getGT(gmi).begin(), buffer[gmi][cc].begin(),1.0/double(vCount));
-
-                            }
-                        }
-                        #ifdef WITH_OPENMP
-                        omp_set_lock(&featureAccLock);
-                        featureAcc_.accumulateFromOther(featureAcc);
-                        omp_unset_lock(&featureAccLock);
-                        #else
-                        featureAcc_.accumulateFromOther(featureAcc);
-                        #endif
-                    }
-                    else{
-                        FeatureAcc featureAcc(nWegihts);
-                        featureAcc.accumulateModelFeatures(gm, dataset_.getGT(gmi).begin(), arg.begin());
-                        #ifdef WITH_OPENMP
-                        omp_set_lock(&featureAccLock);
-                        featureAcc_.accumulateFromOther(featureAcc);
-                        omp_unset_lock(&featureAccLock);
-                        #else
-                        featureAcc_.accumulateFromOther(featureAcc);
-                        #endif
-                    }
-
-
-
-                    // acc features
-                    //omp_set_lock(&featureAccLock);
-                    //featureAcc_.accumulateFromOther(featureAcc);
-                    //omp_unset_lock(&featureAccLock);
-
-                    // unlock the model
-                    #ifdef WITH_OPENMP
-                    omp_set_lock(&modelLockUnlock);
-                    dataset_.unlockModel(gmi);     
-                    omp_unset_lock(&modelLockUnlock);
-                    #else
-                    dataset_.unlockModel(gmi);     
-                    #endif
-
-
-                }
-
-                //const double wRegVal = wReg_(dataset_.getWeights());
-                //const double tObj = std::abs(totalLoss) + wRegVal;
-                if(iteration_%1==0){
-                    std::cout << '\r'
-                              << std::setw(6) << std::setfill(' ') << iteration_ << ':'
-                              << std::setw(8) << -1.0*totalLoss <<"  "<< std::flush;
-                }
-                // update the weights
-                const double wChange =updateWeights();
-                
-            }
-        }
-        weights_ = dataset_.getWeights();
-    }
-
-
-    template<class DATASET>
-    double SubgradientSSVM<DATASET>::updateWeights(){
-
-        const size_t nWegihts = dataset_.getNumberOfWeights();
-
-        WeightsType p(nWegihts);
-        WeightsType newWeights(nWegihts);
-
-        if(para_.learningMode_ == Parameter::Batch){
-            for(size_t wi=0; wi<nWegihts; ++wi){
-                p[wi] =  dataset_.getWeights().getWeight(wi);
-                p[wi] += para_.C_ * featureAcc_.getWeight(wi)/double(dataset_.getNumberOfModels());
-            }
-        }
-        else{
-            for(size_t wi=0; wi<nWegihts; ++wi){
-                p[wi] =  dataset_.getWeights().getWeight(wi);
-                p[wi] += para_.C_ * featureAcc_.getWeight(wi);
-            }
-        }
-
-
-        double wChange = 0.0;
-        
-        for(size_t wi=0; wi<nWegihts; ++wi){
-            const double wOld = dataset_.getWeights().getWeight(wi);
-            const double wNew = wOld - (para_.learningRate_/double(iteration_+1))*p[wi];
-            newWeights[wi] = wNew;
-        }
-
-        weightAveraging_(newWeights);
-
-
-
-        weights_ = dataset_.getWeights();
-        return wChange;
-    }
-}
-}
-#endif
diff --git a/include/opengm/learning/weight_averaging.hxx b/include/opengm/learning/weight_averaging.hxx
deleted file mode 100644
index 815ab4b..0000000
--- a/include/opengm/learning/weight_averaging.hxx
+++ /dev/null
@@ -1,68 +0,0 @@
-#ifndef OPENGM_LEARNING_WEIGHT_AVERAGING_HXX
-#define OPENGM_LEARNING_WEIGHT_AVERAGING_HXX
-
-
-
-namespace opengm{
-namespace learning{
-
-
-    template<class T>
-    class WeightAveraging{
-    public:
-        WeightAveraging(Weights<T> & weights, int order=2)
-        :   weights_(&weights),
-            order_(order),
-            iteration_(1){
-        }
-        WeightAveraging()
-        :   weights_(NULL),
-            order_(2),
-            iteration_(1){
-        }
-        void setWeights(Weights<T> & weights){
-            weights_ = &weights;
-        }
-
-        template<class U>
-        void operator()(const Weights<U> & weights){
-            const T t = static_cast<T>(iteration_);
-            if(order_ == -1){
-                *weights_ = weights;
-            }
-            else if(order_ == 0){
-                throw opengm::RuntimeError("running average is not yet implemented");
-            }
-            else if(order_==1){
-                const T rho = 2.0 / (t + 2.0);
-                for(size_t i=0; i<weights_->size(); ++i){
-                    (*weights_)[i] =  (*weights_)[i]*(1.0 - rho) + weights[i]*rho;
-                }
-            }
-            else if(order_ == 2){
-                const T rho = 6.0 * (t+1.0) / ( (t+2.0)*(2.0*t + 3.0) );
-                for(size_t i=0; i<weights_->size(); ++i){
-                    (*weights_)[i] =  (*weights_)[i]*(1.0 - rho) + weights[i]*rho;
-                }
-            }
-            else{
-                throw opengm::RuntimeError("order must be -1,0,1 or 2");
-            }
-            ++iteration_;
-        }
-        const Weights<T> & weights()const{
-            return weights_;
-        }
-    private:
-        Weights<T>  * weights_;
-        int order_;
-        size_t iteration_;
-    };
-
-
-
-}   // end namespace opengm
-}   // end namespace opengm
-
-
-#endif /*OPENGM_LEARNING_WEIGHT_AVERAGING_HXX*/
diff --git a/include/opengm/python/numpyview.hxx b/include/opengm/python/numpyview.hxx
index 1be7c44..bf0eb21 100644
--- a/include/opengm/python/numpyview.hxx
+++ b/include/opengm/python/numpyview.hxx
@@ -31,8 +31,7 @@ public:
    typedef typename marray::View< V ,false >::const_iterator ConstIteratorType;
    typedef size_t const *  ShapeIteratorType;
    
-   NumpyView():allocFromCpp_(false){
-
+   NumpyView( ):allocFromCpp_(false){
    }
    NumpyView( boost::python::object  obj):allocFromCpp_(false){
       boost::python::numeric::array array = boost::python::extract<boost::python::numeric::array > (obj);
@@ -175,25 +174,6 @@ public:
       return view_.end();   
    }
 
-   marray::View< V, false > getSliceView(size_t dimension, size_t sliceIndex) {
-      // create base coordinate
-      std::vector<size_t> baseIterator(view_.dimension(), 0);
-      baseIterator[dimension] = sliceIndex;
-
-      // create expected shape
-      std::vector<size_t> shapeIterator(view_.shapeBegin(), view_.shapeEnd());
-      shapeIterator[dimension] = 1;
-
-      // return sub view of slice with reduced dimensions
-      marray::View< V, false > new_view = view_.view(baseIterator.begin(), shapeIterator.begin());
-      new_view.squeeze();
-      return new_view;
-   }
-
-   marray::View< V ,false > view()const{
-        return view_;
-   }
-
    //boost::python::object arrayObject()const{
    //   return arrayObj_;
    //};
diff --git a/include/opengm/python/opengmpython.hxx b/include/opengm/python/opengmpython.hxx
index e579e6d..dc7588c 100644
--- a/include/opengm/python/opengmpython.hxx
+++ b/include/opengm/python/opengmpython.hxx
@@ -18,19 +18,11 @@
 #include "opengm/functions/truncated_squared_difference.hxx"
 #include "opengm/functions/sparsemarray.hxx"
 
-#include "opengm/functions/learnable/lpotts.hxx"
-#include "opengm/functions/learnable/lunary.hxx"
-
 #include <opengm/python/opengmpython.hxx>
 #include <opengm/python/converter.hxx>
 #include <opengm/python/numpyview.hxx>
 #include <opengm/python/pythonfunction.hxx>
 
-#include <opengm/learning/dataset/editabledataset.hxx>
-#include <opengm/learning/loss/hammingloss.hxx>
-#include <opengm/learning/loss/generalized-hammingloss.hxx>
-#include <opengm/learning/loss/noloss.hxx>
-#include <opengm/learning/loss/flexibleloss.hxx>
 
 #include <algorithm>
 #include <vector>
@@ -53,34 +45,31 @@ namespace python{
       typedef opengm::ExplicitFunction<V ,I,I> type;
    };
 
-    template<class V,class I>
-    struct FTLGen{
-
-        typedef V ValueType;
-        typedef I IndexType;
-        typedef I LabelType;
-        typedef opengm::ExplicitFunction                      <ValueType,IndexType,LabelType> PyExplicitFunction;
-        typedef opengm::PottsFunction                         <ValueType,IndexType,LabelType> PyPottsFunction;
-        typedef opengm::PottsNFunction                        <ValueType,IndexType,LabelType> PyPottsNFunction;
-        typedef opengm::PottsGFunction                        <ValueType,IndexType,LabelType> PyPottsGFunction;
-        typedef opengm::TruncatedAbsoluteDifferenceFunction   <ValueType,IndexType,LabelType> PyTruncatedAbsoluteDifferenceFunction;
-        typedef opengm::TruncatedSquaredDifferenceFunction    <ValueType,IndexType,LabelType> PyTruncatedSquaredDifferenceFunction;
-        typedef opengm::SparseFunction                        <ValueType,IndexType,LabelType> PySparseFunction; 
-        typedef opengm::functions::learnable::LPotts          <ValueType,IndexType,LabelType> PyLPottsFunction;
-        typedef opengm::functions::learnable::LUnary          <ValueType,IndexType,LabelType> PyLUnaryFunction;
-
-
-        typedef typename opengm::meta::TypeListGenerator<
-            PyExplicitFunction,
-            PyPottsFunction,
-            PyPottsNFunction,
-            PyPottsGFunction,
-            PyTruncatedAbsoluteDifferenceFunction,
-            PyTruncatedSquaredDifferenceFunction,
-            PySparseFunction,
-            PyLPottsFunction,
-            PyLUnaryFunction
-        >::type type;
+   template<class V,class I>
+   struct FTLGen{
+
+      typedef V ValueType;
+      typedef I IndexType;
+      typedef I LabelType;
+      typedef opengm::ExplicitFunction                      <ValueType,IndexType,LabelType> PyExplicitFunction;
+      typedef opengm::PottsFunction                         <ValueType,IndexType,LabelType> PyPottsFunction;
+      typedef opengm::PottsNFunction                        <ValueType,IndexType,LabelType> PyPottsNFunction;
+      typedef opengm::PottsGFunction                        <ValueType,IndexType,LabelType> PyPottsGFunction;
+      typedef opengm::TruncatedAbsoluteDifferenceFunction   <ValueType,IndexType,LabelType> PyTruncatedAbsoluteDifferenceFunction;
+      typedef opengm::TruncatedSquaredDifferenceFunction    <ValueType,IndexType,LabelType> PyTruncatedSquaredDifferenceFunction;
+      typedef opengm::SparseFunction                        <ValueType,IndexType,LabelType> PySparseFunction; 
+      typedef PythonFunction                                <ValueType,IndexType,LabelType> PyPythonFunction; 
+
+      typedef typename opengm::meta::TypeListGenerator<
+         PyExplicitFunction,
+         PyPottsFunction,
+         PyPottsNFunction,
+         PyPottsGFunction,
+         PyTruncatedAbsoluteDifferenceFunction,
+         PyTruncatedSquaredDifferenceFunction,
+         PySparseFunction,
+         PyPythonFunction
+      >::type type;
    };
 
 
@@ -90,8 +79,6 @@ namespace python{
    typedef GmIndexType GmLabelType;
 
 
-   typedef opengm::learning::Weights<GmValueType> PyWeights;
-
 
    // different function types
    typedef opengm::ExplicitFunction                      <GmValueType,GmIndexType,GmLabelType> GmExplicitFunction;
@@ -103,9 +90,8 @@ namespace python{
    typedef opengm::SquaredDifferenceFunction             <GmValueType,GmIndexType,GmLabelType> GmSquaredDifferenceFunction;
    typedef opengm::TruncatedSquaredDifferenceFunction    <GmValueType,GmIndexType,GmLabelType> GmTruncatedSquaredDifferenceFunction;
    typedef opengm::SparseFunction                        <GmValueType,GmIndexType,GmLabelType> GmSparseFunction; 
-   typedef opengm::functions::learnable::LPotts          <GmValueType,GmIndexType,GmLabelType> PyLPottsFunction;
-   typedef opengm::functions::learnable::LUnary          <GmValueType,GmIndexType,GmLabelType> PyLUnaryFunction;
-   
+   typedef opengm::python::PythonFunction                <GmValueType,GmIndexType,GmLabelType> GmPythonFunction; 
+
    typedef std::vector<GmIndexType> IndexVectorType;
    typedef std::vector<IndexVectorType> IndexVectorVectorType;
 
@@ -116,11 +102,6 @@ namespace python{
       FTLGen<GmValueType,GmIndexType>::type
    >::type   GmAdder;
 
-
-   typedef opengm::datasets::EditableDataset<GmAdder, opengm::learning::HammingLoss > GmAdderHammingLossDataset;
-   typedef opengm::datasets::EditableDataset<GmAdder, opengm::learning::GeneralizedHammingLoss > GmAdderGeneralizedHammingLossDataset;
-   typedef opengm::datasets::EditableDataset<GmAdder, opengm::learning::FlexibleLoss > GmAdderFlexibleLossDataset;
-
    typedef GmAdder::FactorType FactorGmAdder;
    typedef FactorGmAdder GmAdderFactor;
 
diff --git a/include/opengm/utilities/metaprogramming.hxx b/include/opengm/utilities/metaprogramming.hxx
index 7ba8d19..af8b56a 100644
--- a/include/opengm/utilities/metaprogramming.hxx
+++ b/include/opengm/utilities/metaprogramming.hxx
@@ -662,16 +662,7 @@ namespace opengm {
          };
          typedef HasTypeInTypeList< TTAIL,TypeToFind>  type;
       };
-
-      /// metaprogramming has type in typelist metafunction     
-      template<class THEAD,class TTAIL>
-      struct HasTypeInTypeList<meta::TypeList<THEAD,TTAIL>,THEAD > : meta::TrueCase{
-      };
-      /// metaprogramming has type in typelist metafunction    
-      template<class TypeToFindx>
-      struct HasTypeInTypeList<meta::ListEnd,TypeToFindx> : meta::FalseCase{
-      };
-
+      
       /// metaprogramming find type with a certain size in typelist metafunction     
       template<class TL,class TSL,size_t SIZE,class NOT_FOUND>
       struct FindSizedType;
@@ -708,70 +699,14 @@ namespace opengm {
 		{
          typedef OTHER_TL type;
       };
-
-
-
-
-
-        template<class TL, class RES_TL>
-        struct RemoveDuplicates;
-
-
-
-        // entry poit
-        template<class TL>
-        struct RemoveDuplicates<TL, meta::ListEnd>{
-            
-
-            // get the first type from tl 
-            typedef typename TL::HeadType FirstEntry;
-            // rest of type list
-            typedef typename TL::TailType RestOfList;
-
-            typedef typename RemoveDuplicates<
-                RestOfList,
-                meta::TypeList<FirstEntry, meta::ListEnd>
-            >::type type;
-        };
-
-
-
-        template<class RES_TL>
-        struct RemoveDuplicates<meta::ListEnd,  RES_TL>{
-            typedef RES_TL type;
-        };
-
-        template<class TL, class RES_TL>
-        struct RemoveDuplicates{
-
-            // get the first type from tl 
-            typedef typename TL::HeadType FirstEntry;
-            // rest of type list
-            typedef typename TL::TailType RestOfList;
-
-
-            typedef typename meta::EvalIf<
-                meta::HasTypeInTypeList<RES_TL, FirstEntry>::value,
-                meta::Self<RES_TL>,
-                meta::BackInsert<RES_TL, FirstEntry>
-            >::type ResultTypeList;
-                
-            typedef typename RemoveDuplicates<
-                RestOfList,
-                ResultTypeList
-            >::type type;
-        };
-
-
-
-        template<class TL,class OTHER_TL>
-        struct MergeTypeListsNoDuplicates{
-            typedef typename MergeTypeLists<TL, OTHER_TL>::type WithDuplicates;
-            typedef typename RemoveDuplicates<WithDuplicates, ListEnd>::type type;
-        };
-
-
-
+      /// metaprogramming has type in typelist metafunction     
+      template<class THEAD,class TTAIL>
+      struct HasTypeInTypeList<meta::TypeList<THEAD,TTAIL>,THEAD > : meta::TrueCase{
+      };
+      /// metaprogramming has type in typelist metafunction    
+      template<class TypeToFindx>
+      struct HasTypeInTypeList<meta::ListEnd,TypeToFindx> : meta::FalseCase{
+      };
       /// metaprogramming inserts a type in typelist or move to end metafunction   
       ///
       /// back inserts a type in a typelist. If the type has been in the typelist
diff --git a/include/opengm/utilities/shape_accessor.hxx b/include/opengm/utilities/shape_accessor.hxx
index 6d9b45e..5541832 100644
--- a/include/opengm/utilities/shape_accessor.hxx
+++ b/include/opengm/utilities/shape_accessor.hxx
@@ -61,88 +61,12 @@ namespace opengm {
       const value_type operator[](const size_t j) const 
          { return factor_->numberOfLabels(j); }
       bool operator==(const FactorShapeAccessor<FACTOR> & other) const 
-         { return factor_ == other.factor_;  }
+         { return factor_ == other.factor_; }
    
    private:
       factor_pointer factor_;
    };
    
-
-
-   template<class SUBSET_ITERATOR, class GM_LABEL_ITER>
-   class SubsetAccessor {
-   public:
-      typedef typename std::iterator_traits<GM_LABEL_ITER>::value_type value_type;
-
-      typedef const value_type reference;
-      typedef const value_type* pointer;
-
-      SubsetAccessor()
-         :  sBegin_(),
-            sEnd_(),
-            gmLabelIter_()
-         {}
-      SubsetAccessor(SUBSET_ITERATOR sBegin, SUBSET_ITERATOR sEnd , GM_LABEL_ITER iter)
-         :  sBegin_(sBegin),
-            sEnd_(sEnd),
-            gmLabelIter_(iter)
-         {}
-      size_t size() const 
-         { return std::distance(sBegin_, sEnd_); }
-      reference operator[](const size_t j) 
-         { return gmLabelIter_[sBegin_[j]]; }
-      const value_type operator[](const size_t j) const 
-         { return gmLabelIter_[sBegin_[j]];  }
-      bool operator==(const SubsetAccessor  & other) const 
-      { 
-        return sBegin_ == other.sBegin_ && 
-               sEnd_ == other.sEnd_ && 
-               gmLabelIter_==other.gmLabelIter_; 
-      }
-   
-   private:
-      SUBSET_ITERATOR sBegin_;
-      SUBSET_ITERATOR sEnd_;
-      GM_LABEL_ITER gmLabelIter_;
-   };
-
-
-
-   template<class FACTOR, class GM_LABEL_ITER>
-   class GmLabelFactorLabelAccessor {
-   public:
-      typedef typename std::iterator_traits<GM_LABEL_ITER>::value_type value_type;
-
-      typedef const value_type reference;
-      typedef const value_type* pointer;
-      typedef const FACTOR& factor_reference;
-      typedef const FACTOR* factor_pointer;
-
-      GmLabelFactorLabelAccessor()
-         :  factor_(NULL),
-            gmLabelIter_()
-         {}
-      GmLabelFactorLabelAccessor(factor_reference f , GM_LABEL_ITER iter)
-         :  factor_(&f),
-            gmLabelIter_(iter)
-         {}
-      size_t size() const 
-         { return factor_ == 0 ? 0 : factor_->numberOfVariables(); }
-      reference operator[](const size_t j) 
-         { return gmLabelIter_[factor_->variableIndex(j)]; }
-      const value_type operator[](const size_t j) const 
-         { return gmLabelIter_[factor_->variableIndex(j)]; }
-      bool operator==(const FactorShapeAccessor<FACTOR> & other) const 
-      { return factor_ == other.factor_ && gmLabelIter_==other.gmLabelIter_; 
-      }
-   
-   private:
-      factor_pointer factor_;
-      GM_LABEL_ITER gmLabelIter_;
-   };
-
-
-
    template<class FACTOR>
    class FactorVariablesAccessor {
    public:
diff --git a/src/examples/unsorted-examples/inference_types.cxx b/src/examples/unsorted-examples/inference_types.cxx
index dcc4b98..3ae18e5 100644
--- a/src/examples/unsorted-examples/inference_types.cxx
+++ b/src/examples/unsorted-examples/inference_types.cxx
@@ -34,20 +34,19 @@ int main() {
       // ....
       // assume starting point is filled with labels
       OptimizerMinimizerParameterType minimizerParameter(
-         OptimizerMinimizerType::SINGLE_VARIABLE  // flip a single variable (FACTOR for flip all var. a factor depends on)
+         OptimizerMinimizerType::SINGLE_VARIABLE,  // flip a single variable (FACTOR for flip all var. a factor depends on)
+         startingPoint
       );
       // without starting point
       OptimizerMaximizerParameterType maximizerParameter(
-         OptimizerMaximizerType::FACTOR  // flip a single variable (FACTOR for flip all var. a factor depends on)
+         OptimizerMaximizerType::FACTOR,  // flip a single variable (FACTOR for flip all var. a factor depends on)
+         startingPoint
       );
       
       // construct optimizers ( minimizer and maximizer )
       OptimizerMinimizerType optimizerMinimizer(gm,minimizerParameter);
       OptimizerMaximizerType optimizerMaximizer(gm,maximizerParameter);
       
-      optimizerMinimizer.setStartingPoint(startingPoint.begin());
-      optimizerMaximizer.setStartingPoint(startingPoint.begin());
-
       // optimize the models ( minimizer and maximize )
       optimizerMinimizer.infer();
       optimizerMaximizer.infer();
diff --git a/src/interfaces/commandline/double/opengm_min_sum.cxx b/src/interfaces/commandline/double/opengm_min_sum.cxx
index 40f47a4..832e601 100644
--- a/src/interfaces/commandline/double/opengm_min_sum.cxx
+++ b/src/interfaces/commandline/double/opengm_min_sum.cxx
@@ -35,6 +35,11 @@
 #include "../../common/caller/trws_caller.hxx"
 #endif
 
+#if (defined(WITH_MAXFLOW) )
+#include "../../common/caller/lsatr_caller.hxx"
+#endif
+
+
 #if (defined(WITH_MAXFLOW) || defined(WITH_BOOST))
 #include "../../common/caller/graphcut_caller.hxx"
 #include "../../common/caller/alphaexpansion_caller.hxx"
@@ -42,10 +47,6 @@
 #include "../../common/caller/qpbo_caller.hxx"
 #endif
 
-#if (defined(WITH_MAXFLOW) )
-#include "../../common/caller/lsatr_caller.hxx"
-#endif
-
 #ifdef WITH_QPBO
 #include "../../common/caller/mqpbo_caller.hxx"
 #ifdef WITH_BOOST
diff --git a/src/interfaces/python/opengm/CMakeLists.txt b/src/interfaces/python/opengm/CMakeLists.txt
index 4511e23..4c18034 100644
--- a/src/interfaces/python/opengm/CMakeLists.txt
+++ b/src/interfaces/python/opengm/CMakeLists.txt
@@ -37,7 +37,6 @@ include_directories(
 
 add_subdirectory(opengmcore)
 add_subdirectory(inference)
-add_subdirectory(learning)
 add_subdirectory(hdf5)
 add_subdirectory(benchmark)
 
diff --git a/src/interfaces/python/opengm/__init__.py b/src/interfaces/python/opengm/__init__.py
index a479397..8258c93 100644
--- a/src/interfaces/python/opengm/__init__.py
+++ b/src/interfaces/python/opengm/__init__.py
@@ -14,7 +14,7 @@ from _inference_interface_generator import _inject_interface , InferenceBase
 import inference
 import hdf5
 import benchmark
-from _to_native_converter import to_native_boost_python_enum_converter
+
 # initialize solver/ inference dictionaries
 _solverDicts=[
    (inference.adder.minimizer.solver.__dict__ ,     'adder',       'minimizer' ),
diff --git a/src/interfaces/python/opengm/_inference_interface_generator.py b/src/interfaces/python/opengm/_inference_interface_generator.py
index 15aa072..887d26a 100644
--- a/src/interfaces/python/opengm/_inference_interface_generator.py
+++ b/src/interfaces/python/opengm/_inference_interface_generator.py
@@ -211,60 +211,6 @@ def classGenerator(
 
         self.inference = self._selectedInfClass(self.gm, self.parameter)
 
-    @classmethod
-    def get_cpp_parameter(cls, operator, accumulator, parameter):
-        _meta_parameter = parameter
-        # get hyper parameter (as minStCut for graphcut, or the subsolver for
-        # dualdec.)
-        hyperParamKeywords = inferenceClasses.hyperParameterKeywords
-        numHyperParams = len(hyperParamKeywords)
-        userHyperParams = [None]*numHyperParams
-        collectedHyperParameters = 0
-        # get the users hyper parameter ( if given)
-
-        if(_meta_parameter is not None):
-            for hpIndex, hyperParamKeyword in enumerate(hyperParamKeywords):
-                if hyperParamKeyword in _meta_parameter.kwargs:
-                    userHyperParams[hpIndex] = _meta_parameter.kwargs.pop(
-                        hyperParamKeyword)
-                    collectedHyperParameters += 1
-
-            # check if ZERO or ALL hyperParamerts have been collected
-            if collectedHyperParameters != 0 and collectedHyperParameters != numHyperParams:
-                raise RuntimeError("All or none hyper-parameter must be given")
-
-        # check if the WHOLE tuple of hyperParameters is allowed
-        if collectedHyperParameters != 0:
-            if tuple(str(x) for x in userHyperParams) not in inferenceClasses.implDict:
-                raise RuntimeError("%s is not an allowed hyperParameter\nAllowed hyperParameters are %s" % (
-                    repr(userHyperParams), repr(inferenceClasses.implDict.keys())))
-        else:
-            userHyperParams = defaultHyperParams
-
-        #try:
-        # get the selected inference class and the parameter
-        if(numHyperParams == 0):
-            
-            _selectedInfClass, _selectedInfParamClass = inferenceClasses.implDict[
-                    "__NONE__"][(operator, accumulator)]
-        else:
-            hp = tuple(str(x) for x in userHyperParams)
-            _selectedInfClass, _selectedInfParamClass = inferenceClasses.implDict[
-                hp][(operator, accumulator)]
-        #except:
-        #    dictStr=str(inferenceClasses.implDict)
-        #    raise RuntimeError("given seminring (operator = %s ,accumulator = %s) is not implemented for this solver\n %s" % \
-        #        (operator, accumulator,dictStr))
-
-        if _meta_parameter is None:
-            cppParam = self._selectedInfClass._parameter()
-            cppParam.set()
-        else:
-            cppParam = to_native_class_converter(
-                givenValue=_meta_parameter, nativeClass=_selectedInfParamClass)
-            assert cppParam is not None
-
-        return cppParam
     def verboseVisitor(self, printNth=1, multiline=True):
         """ factory function to get a verboseVisitor:
 
@@ -626,11 +572,6 @@ def classGenerator(
     infClass = type(classname, (InferenceBase,), memberDict)
 
     infClass.__init__ = inference_init
-
-
-    infClass.get_cpp_parameter = get_cpp_parameter
-
-
     # print to string!!!
     old_stdout = sys.stdout
     sys.stdout = mystdout = StringIO()
diff --git a/src/interfaces/python/opengm/functionhelper.py b/src/interfaces/python/opengm/functionhelper.py
index c8833e8..cf424cb 100644
--- a/src/interfaces/python/opengm/functionhelper.py
+++ b/src/interfaces/python/opengm/functionhelper.py
@@ -4,12 +4,12 @@ from opengmcore._opengmcore import (SparseFunction,
                                     TruncatedAbsoluteDifferenceFunction,
                                     TruncatedSquaredDifferenceFunction,
                                     PottsFunction, PottsNFunction,
-                                    PottsGFunction ,
+                                    PottsGFunction , PythonFunction,
                                     SparseFunctionVector, 
                                     TruncatedAbsoluteDifferenceFunctionVector,
                                     TruncatedSquaredDifferenceFunctionVector,
                                     PottsFunctionVector, PottsNFunctionVector,
-                                    PottsGFunctionVector ,
+                                    PottsGFunctionVector , PythonFunctionVector,
                                     
                                     )
 
diff --git a/src/interfaces/python/opengm/inference/inf_def_visitor.hxx b/src/interfaces/python/opengm/inference/inf_def_visitor.hxx
index 87cb5e0..dd2b051 100644
--- a/src/interfaces/python/opengm/inference/inf_def_visitor.hxx
+++ b/src/interfaces/python/opengm/inference/inf_def_visitor.hxx
@@ -141,7 +141,6 @@ public:
     template <class classT>
     void visit(classT& c) const{
         std::string className = std::string("_")+algName_;
-
         for(size_t hp=0;hp<infSetup_.hyperParameters.size();++hp){
             className+=std::string("_");
             className+=infSetup_.hyperParameters[hp];
diff --git a/src/interfaces/python/opengm/inference/pyFusionMoves.cxx b/src/interfaces/python/opengm/inference/pyFusionMoves.cxx
index b8ec9c1..e7debfe 100644
--- a/src/interfaces/python/opengm/inference/pyFusionMoves.cxx
+++ b/src/interfaces/python/opengm/inference/pyFusionMoves.cxx
@@ -123,10 +123,10 @@ private:
 			#endif 
 		}
 		else if(fusionSolver==std::string("lf2")){
-			return  fusionMover_. template fuse<LazyFlipperSubInf> (typename LazyFlipperSubInf::Parameter(size_t(2)),true);
+			return  fusionMover_. template fuse<LazyFlipperSubInf> (typename LazyFlipperSubInf::Parameter(2),true);
 		}
 		else if(fusionSolver==std::string("lf3")){
-			return  fusionMover_. template fuse<LazyFlipperSubInf> (typename LazyFlipperSubInf::Parameter(size_t(3)),true);
+			return  fusionMover_. template fuse<LazyFlipperSubInf> (typename LazyFlipperSubInf::Parameter(3),true);
 		}
 
         throw opengm::RuntimeError("unknown fusion solver");
diff --git a/src/interfaces/python/opengm/learning/CMakeLists.txt b/src/interfaces/python/opengm/learning/CMakeLists.txt
deleted file mode 100644
index a594ea4..0000000
--- a/src/interfaces/python/opengm/learning/CMakeLists.txt
+++ /dev/null
@@ -1,151 +0,0 @@
-#--------------------------------------------------------------
-# Include Directories
-#--------------------------------------------------------------
-find_package(NUMPY)
-include_directories(
-    ${CMAKE_CURRENT_SOURCE_DIR}
-	 ${PYTHON_INCLUDE_DIRS}
-	 ${PYTHON_NUMPY_INCLUDE_DIR}
-    ${Boost_INCLUDE_DIR}
-    ${Boost_PYTHON_INCLUDE_DIR}
-)
-
-
-
-
-
-#--------------------------------------------------------------
-# Add opengmcore library
-#--------------------------------------------------------------
-set(PY_OPENGM_CORE_SOURCES
-            learning.cxx
-            pyLFunctionGen.cxx
-            pyWeights.cxx
-            pyDataset.cxx
-            pyLoss.cxx
-            pyGridSearchLearner.cxx
-            #pyMaxLikelihoodLearner.cxx
-            pyStructMaxMarginLearner.cxx
-            pySubgradientSSVM.cxx
-            pyStructPerceptron.cxx
-            pyRws.cxx
-            )
-
-if(APPLE)
-    add_library(_learning MODULE ${PY_OPENGM_CORE_SOURCES})
-else()
-    add_library(_learning SHARED ${PY_OPENGM_CORE_SOURCES})
-endif(APPLE)
-
-
-#--------------------------------------------------------------
-# Link libraries
-#--------------------------------------------------------------
-if(OPENMP_FOUND)
-    SET_TARGET_PROPERTIES(_learning PROPERTIES COMPILE_FLAGS "${OpenMP_CXX_FLAGS}")
-    SET_TARGET_PROPERTIES(_learning PROPERTIES LINK_FLAGS "${OpenMP_CXX_FLAGS}")
-endif()
-
-if(MSVC AND NOT(MSVC_VERSION LESS 1400))
-    SET_PROPERTY(TARGET _learning APPEND_STRING PROPERTY COMPILE_FLAGS " /bigobj")
-endif()
-if(APPLE)
-    SET_PROPERTY(TARGET _learning APPEND_STRING PROPERTY LINK_FLAGS " -undefined dynamic_lookup")
-endif(APPLE)
-
-
-if(LINK_RT)
-    find_library(RT rt)
-    target_link_libraries(_learning ${Boost_PYTHON_LIBRARIES} rt)
-else()
-    target_link_libraries(_learning ${Boost_PYTHON_LIBRARIES})
-endif(LINK_RT)
-
-set_target_properties(_learning PROPERTIES PREFIX "")
-
-
-IF(WIN32)
-    SET_TARGET_PROPERTIES(_learning PROPERTIES OUTPUT_NAME "learning"  PREFIX "_" SUFFIX  ".pyd")
-ELSEIF(APPLE)
-    SET_TARGET_PROPERTIES(_learning PROPERTIES OUTPUT_NAME "learning" PREFIX "_" SUFFIX ".so")
-ELSE()
-    SET_TARGET_PROPERTIES(_learning PROPERTIES OUTPUT_NAME "learning"   PREFIX "_")
-ENDIF()
-
-target_link_libraries(_learning  ${HDF5_CORE_LIBRARY} ${HDF5_LIBRARIES} )
-
-
-
-if(WITH_LIBDAI)
-  target_link_libraries(_learning ${LIBDAI_LIBRARY})
-  target_link_libraries(_learning ${GMPXX_LIBRARY})
-  target_link_libraries(_learning ${GMP_LIBRARY}) 
-endif()
-
-if(WITH_QPBO)
-   target_link_libraries(_learning external-library-qpbo-shared)
-endif()
-
-if(WITH_MAXFLOW)
-   target_link_libraries(_learning external-library-maxflow-shared)
-endif()
-
-if(WITH_MAXFLOW_IBFS)
-  target_link_libraries(_learning external-library-maxflow-ibfs-shared)
-endif()
-
-if(WITH_TRWS)
-   target_link_libraries(_learning external-library-trws-shared)
-endif()
-
-
-if(WITH_FASTPD)
-   target_link_libraries(_learning external-library-fastpd-shared)
-endif()
-
-if(WITH_AD3)
-   target_link_libraries(_learning external-library-ad3-shared )
-endif()
-
-#SET(LINK_FLAGS "${LINK_FLAGS} -PIC")
-#SET_TARGET_PROPERTIES(_learning PROPERTIES LINK_FLAGS   "-fPIC")
-#add_definitions(-fPIC)
-
-if(WITH_CONICBUNDLE)
-  #target_link_libraries(_learning ${CONICBUNDLE_LIBRARY})
-endif()
-
-if(WITH_MRF)
-   target_link_libraries(_learning external-library-mrf-shared)
-endif()
-
-
-
-
-if(WITH_CPLEX)
-  if(WIN32)
-      target_link_libraries(_learning wsock32.lib ${CPLEX_ILOCPLEX_LIBRARY} ${CPLEX_LIBRARY} ${CPLEX_CONCERT_LIBRARY})
-   else()
-      target_link_libraries(_learning ${CMAKE_THREAD_LIBS_INIT} ${CPLEX_ILOCPLEX_LIBRARY} ${CPLEX_LIBRARY} ${CPLEX_CONCERT_LIBRARY} )
-    endif()
-endif()
-
-
-if(WITH_GUROBI)
-  target_link_libraries(_learning ${CMAKE_THREAD_LIBS_INIT} 
-    ${GUROBI_LIBRARIES}
-    #${GUOBI_CXX_LIBRARY}  
-    ${CMAKE_THREAD_LIBS_INIT}
-  )
-endif()
-
-#--------------------------------------------------------------
-# Copy from src to build
-#--------------------------------------------------------------
-
-if( ${CMAKE_CURRENT_SOURCE_DIR} STREQUAL  ${CMAKE_CURRENT_BINARY_DIR} )
-   message(STATUS "same src and build dir.")
-else()
-   message(STATUS "copy python-learning files  from src to build" )
-   file(COPY ${CMAKE_CURRENT_SOURCE_DIR}/__init__.py DESTINATION ${CMAKE_CURRENT_BINARY_DIR} )
-endif()
diff --git a/src/interfaces/python/opengm/learning/__init__.py b/src/interfaces/python/opengm/learning/__init__.py
deleted file mode 100644
index f1e80d2..0000000
--- a/src/interfaces/python/opengm/learning/__init__.py
+++ /dev/null
@@ -1,726 +0,0 @@
-from _learning import *
-from _learning import _lunarySharedFeatFunctionsGen,_lpottsFunctionsGen
-import numpy
-import struct
-from opengm import index_type,value_type, label_type, graphicalModel,gridVis
-from opengm import configuration as opengmConfig, LUnaryFunction
-from opengm import to_native_boost_python_enum_converter
-from opengm import Tribool
-#from progressbar import *
-from functools import partial
-
-
-def _extendedGetLoss(self, model_idx, infCls, parameter = None):
-    if parameter is None:
-        import opengm
-        parameter = opengm.InfParam()
-    cppParam  =  infCls.get_cpp_parameter(operator='adder',accumulator='minimizer',parameter=parameter)
-    return self._getLoss(cppParam, model_idx)
-
-def _extendedGetTotalLoss(self, infCls, parameter = None):
-    if parameter is None:
-        import opengm
-        parameter = opengm.InfParam()
-    cppParam  =  infCls.get_cpp_parameter(operator='adder',accumulator='minimizer',parameter=parameter)
-    return self._getTotalLoss(cppParam)
-
-
-
-
-
-
-DatasetWithFlexibleLoss.lossType = 'flexible'
-
-
-class LossParameter(FlexibleLossParameter):
-    def __init__(self, lossType, labelMult=None, nodeMult=None, factorMult=None):
-        super(LossParameter, self).__init__()
-
-        self.lossType = to_native_boost_python_enum_converter(lossType,self.lossType.__class__)
-
-        if labelMult is not None:
-            assert self.lossType == LossType.hamming
-            self.setLabelLossMultiplier(labelMult)
-        if nodeMult is not None:
-            assert self.lossType != LossType.partition
-            self.setNodeLossMultiplier(nodeMult)
-        if factorMult is not None:
-            assert self.lossType == LossType.partition
-            self.setFactorLossMultiplier(factorMult)
-
-
-
-def extend_learn():
-    
-    def learner_learn_normal(self, infCls, parameter = None):
-        if parameter is None:
-            import opengm
-            parameter = opengm.InfParam()
-        cppParam  =  infCls.get_cpp_parameter(operator='adder',accumulator='minimizer',parameter=parameter)
-        try:
-          self._learn(cppParam)
-        except Exception, e:
-            #print "an error ",e,"\n\n"
-            if (str(e).find("did not match C++ signature")):
-                raise RuntimeError("infCls : '%s' is not (yet) exported from c++ to python for learning"%str(infCls))
-
-
-    def learner_learn_reduced_inf(self, infCls, parameter = None, persistency=True, tentacles=False, connectedComponents=False):
-        if parameter is None:
-            import opengm
-            parameter = opengm.InfParam()
-        cppParam  =  infCls.get_cpp_parameter(operator='adder',accumulator='minimizer',parameter=parameter)
-        try:
-          self._learnReducedInf(cppParam, bool(persistency), bool(tentacles),bool(connectedComponents))
-        except Exception, e:
-            #print "an error ",e,"\n\n"
-            if (str(e).find("did not match C++ signature")):
-                raise RuntimeError("infCls : '%s' is not (yet) exported from c++ to python for learning with reduced inference"%str(infCls))
-
-    def learner_learn_reduced_inf_self_fusion(self, infCls, parameter = None, persistency=True, tentacles=False, connectedComponents=False):
-        if parameter is None:
-            import opengm
-            parameter = opengm.InfParam()
-        cppParam  =  infCls.get_cpp_parameter(operator='adder',accumulator='minimizer',parameter=parameter)
-        try:
-          self._learnReducedInf(cppParam, bool(persistency), bool(tentacles),bool(connectedComponents))
-        except Exception, e:
-            #print "an error ",e,"\n\n"
-            if (str(e).find("did not match C++ signature")):
-                raise RuntimeError("infCls : '%s' is not (yet) exported from c++ to python for learning with reduced inference"%str(infCls))
-
-    def learner_learn_self_fusion(self, infCls, parameter = None, fuseNth=1, fusionSolver="qpbo",maxSubgraphSize=2,
-                                  redInf=True, connectedComponents=False, fusionTimeLimit=100.9, numStopIt=10):
-        if parameter is None:
-            import opengm
-            parameter = opengm.InfParam()
-        cppParam  =  infCls.get_cpp_parameter(operator='adder',accumulator='minimizer',parameter=parameter)
-        try:
-          self._learnSelfFusion(cppParam, int(fuseNth),str(fusionSolver),int(maxSubgraphSize),bool(redInf),
-                                bool(connectedComponents),float(fusionTimeLimit),int(numStopIt))
-        except Exception, e:
-            #print "an error ",e,"\n\n"
-            if (str(e).find("did not match C++ signature")):
-                raise RuntimeError("infCls : '%s' is not (yet) exported from c++ to python for learning with self fusion inference"%str(infCls))
-
-    def learner_learn(self, infCls, parameter=None, infMode='normal',**kwargs):
-        assert infMode in ['normal','n','selfFusion','sf','reducedInference','ri','reducedInferenceSelfFusion','risf']
-
-        if infMode in ['normal','n']:
-            self.learnNormal(infCls=infCls, parameter=parameter)
-        elif infMode in ['selfFusion','sf']:
-            self.learnSelfFusion(infCls=infCls, parameter=parameter,**kwargs)
-        elif infMode in ['reducedInference','ri']:
-            self.learnReducedInf(infCls=infCls, parameter=parameter,**kwargs)
-        elif infMode in ['reducedInferenceSelfFusion','risf']:
-            self.learnReducedInfSelfFusion(infCls=infCls, parameter=parameter,**kwargs)
-
-    # all learner classes
-    learnerClss = [GridSearch_FlexibleLoss, StructPerceptron_FlexibleLoss,  
-                  SubgradientSSVM_FlexibleLoss, Rws_FlexibleLoss] 
-    if opengmConfig.withCplex or opengmConfig.withGurobi :
-        learnerClss.append(StructMaxMargin_Bundle_FlexibleLoss)
-
-    for learnerCls in learnerClss:
-        learnerCls.learn = learner_learn
-        learnerCls.learnNormal = learner_learn_normal
-        learnerCls.learnReducedInf = learner_learn_reduced_inf
-        learnerCls.learnSelfFusion = learner_learn_self_fusion
-        learnerCls.learnReducedInfSelfFusion = learner_learn_reduced_inf_self_fusion
-
-extend_learn()
-del extend_learn
-
-
-
-
-
-DatasetWithFlexibleLoss.getLoss = _extendedGetLoss
-DatasetWithFlexibleLoss.getTotalLoss = _extendedGetTotalLoss
-
-
-def createDataset(numWeights,  numInstances=0):
-    w  = Weights(numWeights)
-
-    # if loss not in ['hamming','h','gh','generalized-hamming']:
-    #     raise RuntimeError("loss must be 'hamming' /'h' or 'generalized-hamming'/'gh' ")    
-    # if loss in ['hamming','h']:
-    #     dataset = DatasetWithHammingLoss(int(numInstances))
-    # elif loss in ['generalized-hamming','gh']:
-    #     dataset = DatasetWithGeneralizedHammingLoss(int(numInstances))
-    # else:
-    #     raise RuntimeError("loss must be 'hamming' /'h' or 'generalized-hamming'/'gh' ")   
-    dataset = DatasetWithFlexibleLoss(numInstances)
-    dataset.setWeights(w)
-    weights = dataset.getWeights()
-    for wi in range(numWeights):
-        weights[wi] = 0.0
-    return dataset
-
-
-
-
-def gridSearchLearner(dataset, lowerBounds, upperBounds, nTestPoints):
-    assert dataset.__class__.lossType == 'flexible'
-    learnerCls = GridSearch_FlexibleLoss
-    learnerParamCls = GridSearch_FlexibleLossParameter
-
-    nr = numpy.require 
-    sizeT_type = 'uint64'
-
-    if struct.calcsize("P") * 8 == 32:
-        sizeT_type = 'uint32'
-
-    param = learnerParamCls(nr(lowerBounds,dtype='float64'), nr(upperBounds,dtype='float64'), 
-                           nr(nTestPoints,dtype=sizeT_type))
-
-    learner = learnerCls(dataset, param)
-    return learner
-
-
-def structPerceptron(dataset, learningMode='online',eps=1e-5, maxIterations=10000, stopLoss=0.0, decayExponent=0.0, decayT0=0.0):
-
-    assert dataset.__class__.lossType == 'flexible'
-    learnerCls = StructPerceptron_FlexibleLoss
-    learnerParamCls = StructPerceptron_FlexibleLossParameter
-    learningModeEnum = StructPerceptron_FlexibleLossParameter_LearningMode
-
-    lm = None
-    if learningMode not in ['online','batch']:
-        raise RuntimeError("wrong learning mode, must be 'online' or 'batch' ")
-
-    if learningMode == 'online':
-        lm = learningModeEnum.online
-    if learningMode == 'batch':
-        lm = learningModeEnum.batch
-
-    param = learnerParamCls()
-    param.eps = float(eps)
-    param.maxIterations = int(maxIterations)
-    param.stopLoss = float(stopLoss)
-    param.decayExponent = float(decayExponent)
-    param.decayT0 = float(decayT0)
-    param.learningMode = lm
-    learner = learnerCls(dataset, param)
-    return learner
-
-
-def rws(dataset,eps=1e-5, maxIterations=10000, stopLoss=0.0, learningRate=1.0, C=100.0, sigma=1.0, p=10):
-
-    assert dataset.__class__.lossType == 'flexible'
-    learnerCls = Rws_FlexibleLoss
-    learnerParamCls = Rws_FlexibleLossParameter
-
-
-    param = learnerParamCls()
-    param.eps = float(eps)
-    param.maxIterations = int(maxIterations)
-    param.stopLoss = float(stopLoss)
-    param.learningRate = float(learningRate)
-    param.C = float(C)
-    param.p = int(p)
-    param.sigma = float(sigma)
-    learner = learnerCls(dataset, param)
-    return learner
-
-
-
-def subgradientSSVM(dataset, learningMode='batch',eps=1e-5, maxIterations=10000, stopLoss=0.0, learningRate=1.0, C=100.0, averaging=-1, nConf=0):
-
-    assert dataset.__class__.lossType == 'flexible'
-    learnerCls = SubgradientSSVM_FlexibleLoss
-    learnerParamCls = SubgradientSSVM_FlexibleLossParameter
-    learningModeEnum = SubgradientSSVM_FlexibleLossParameter_LearningMode
-
-    lm = None
-    if learningMode not in ['online','batch']:
-        raise RuntimeError("wrong learning mode, must be 'online' or 'batch' ")
-
-    if learningMode == 'online':
-        lm = learningModeEnum.online
-    if learningMode == 'batch':
-        lm = learningModeEnum.batch
-    param = learnerParamCls()
-    param.eps = float(eps)
-    param.maxIterations = int(maxIterations)
-    param.stopLoss = float(stopLoss)
-    param.learningRate = float(learningRate)
-    param.C = float(C)
-    param.learningMode = lm
-    param.averaging = int(averaging)
-    param.nConf = int(nConf)
-    learner = learnerCls(dataset, param)
-    return learner
-
-def structMaxMarginLearner(dataset, regularizerWeight=1.0, minEps=1e-5, nSteps=0, epsStrategy='change', optimizer='bundle'):
-
-    if opengmConfig.withCplex or opengmConfig.withGurobi :
-        if optimizer != 'bundle':
-            raise RuntimeError("Optimizer type must be 'bundle' for now!")
-
-
-        assert dataset.__class__.lossType == 'flexible'
-        learnerCls = StructMaxMargin_Bundle_FlexibleLoss
-        learnerParamCls = StructMaxMargin_Bundle_FlexibleLossParameter
-
-        epsFromGap = False
-        if epsStrategy == 'gap':
-            epsFromGap = True
-        elif epsStrategy == 'change':
-            epsFromGap = False
-
-        param = learnerParamCls(regularizerWeight, minEps, nSteps, epsFromGap)
-        learner = learnerCls(dataset, param)
-        
-        return learner
-    else:
-        raise RuntimeError("this learner needs withCplex or withGurobi")
-
-
-def maxLikelihoodLearner(
-        dataset, 
-        maximumNumberOfIterations = 100,
-        gradientStepSize = 0.1,
-        weightStoppingCriteria = 0.00000001,
-        gradientStoppingCriteria = 0.00000000001,
-        infoFlag = True,
-        infoEveryStep = False,
-        weightRegularizer = 1.0,
-        beliefPropagationMaximumNumberOfIterations = 40,
-        beliefPropagationConvergenceBound = 0.0001,
-        beliefPropagationDamping = 0.5,
-        beliefPropagationReg = 1.0,
-        beliefPropagationTemperature = 1.0,
-        beliefPropagationIsAcyclic = Tribool(0)
-):
-
-    learnerCls = MaxLikelihood_FlexibleLoss
-    learnerParamCls = MaxLikelihood_FlexibleLossParameter
-
-    param = learnerParamCls(
-        maximumNumberOfIterations,
-        gradientStepSize,
-        weightStoppingCriteria,
-        gradientStoppingCriteria,
-        infoFlag,
-        infoEveryStep,
-        weightRegularizer,
-        beliefPropagationMaximumNumberOfIterations,
-        beliefPropagationConvergenceBound,
-        beliefPropagationDamping,
-        beliefPropagationTemperature,
-        beliefPropagationIsAcyclic
-    )
-    #param.maxIterations = int(maxIterations)
-    #param.reg = float(reg)
-    #param.temperature = float(temp)
-
-    learner = learnerCls(dataset, param)
-        
-    return learner
-
-
-
-
-
-def lUnaryFunction(weights, numberOfLabels, features, weightIds):
-
-    assert numberOfLabels >= 2
-    features = numpy.require(features, dtype=value_type)
-    weightIds = numpy.require(weightIds, dtype=index_type)
-
-    assert features.ndim == weightIds.ndim
-    if features.ndim == 1 or weightIds.ndim == 1:
-        assert numberOfLabels == 2
-        assert features.shape[0]  == weightIds.shape[0]
-        features  = features.reshape(1,-1)
-        weightIds = weightIds.reshape(1,-1)
-
-    assert features.shape[0] in [numberOfLabels, numberOfLabels-1]
-    assert weightIds.shape[0] in [numberOfLabels, numberOfLabels-1]
-    assert features.shape[1]  == weightIds.shape[1]
-
-
-    return LUnaryFunction(weights=weights, numberOfLabels=int(numberOfLabels), 
-                          features=features, weightIds=weightIds)
-
-
-
-
-class FeaturePolicy(object):
-    sharedBetweenLabels = 0
-
-def lUnaryFunctions(weights,numberOfLabels, features, weightIds,
-                    featurePolicy = FeaturePolicy.sharedBetweenLabels, 
-                    **kwargs):
-
-    if (featurePolicy == FeaturePolicy.sharedBetweenLabels ):
-
-        makeFirstEntryConst = kwargs.get('makeFirstEntryConst',False)
-        addConstFeature = kwargs.get('addConstFeature',False)
-
-
-        ff = numpy.require(features, dtype=value_type)
-        wid = numpy.require(weightIds, dtype=index_type)
-
-        assert features.ndim == 2
-        assert weightIds.ndim == 2
-
-
-        res = _lunarySharedFeatFunctionsGen(
-            weights = weights,
-            numFunctions = int(ff.shape[0]),
-            numLabels = int(numberOfLabels),
-            features = ff,
-            weightIds = wid,
-            makeFirstEntryConst = bool(makeFirstEntryConst),
-            addConstFeature = bool(addConstFeature)
-        )
-
-        res.__dict__['_features_'] =features
-        res.__dict__['_ff_'] = ff
-        res.__dict__['_weights_'] =  weights
-
-        return res
-    else :
-        raise RuntimeError("noy yet implemented")
-
-def lPottsFunctions(weights, numberOfLabels, features, weightIds,
-                    addConstFeature = False):
-
-    # check that features has the correct shape
-    if features.ndim != 2:
-        raise RuntimeError("feature must be two-dimensional")
-
-    # check that weights has the correct shape
-    if weightIds.ndim != 1:
-        raise RuntimeError("weightIds must be one-dimensional")
-    if weightIds.shape[0] != features.shape[1] + int(addConstFeature) :
-        raise RuntimeError("weightIds.shape[0]  must be equal to features.shape[1]")
-
-
-
-    # do the c++ call here
-    # which generates a function generator
-
-
-    ff = numpy.require(features, dtype=value_type)
-    wid = numpy.require(weightIds, dtype=index_type)
-    res =  _lpottsFunctionsGen(
-        weights=weights,
-        numFunctions=long(features.shape[0]),
-        numLabels=long(numberOfLabels),
-        features=ff,
-        weightIds=wid,
-        addConstFeature=bool(addConstFeature)
-    )
-
-    res.__dict__['_features_'] = wid
-    res.__dict__['_weights_'] = ff
-    return res
-
-
-
-
-
-
-
-# def getPbar(size, name):
-#     widgets = ['%s: '%name, Percentage(), ' ', Bar(marker='0',left='[',right=']'),
-#                ' ', ETA(), ' ', FileTransferSpeed()] #see docs for other options
-#     pbar = ProgressBar(widgets=widgets, maxval=size)
-#     return pbar
-
-def secondOrderImageDataset(imgs, gts, numberOfLabels, fUnary, fBinary, addConstFeature, trainFraction=0.75):
-    #try:
-    #    import vigra
-    #    from progressbar import *
-    #except:
-    #    pass
-
-    # train test
-    nImg = len(imgs)
-    nTrain = int(float(nImg)*trainFraction+0.5)
-    nTest = (nImg-nTrain)
-    
-    def getFeat(fComp, im):
-        res = []
-        for f in fComp:
-            r = f(im)
-            if r.ndim == 2:
-                r = r[:,:, None]
-            res.append(r)
-        return res
-
-    # compute features for a single image
-    tImg = imgs[0]
-    unaryFeat = getFeat(fUnary, tImg)
-    unaryFeat = numpy.nan_to_num(numpy.concatenate(unaryFeat,axis=2).view(numpy.ndarray))
-    nUnaryFeat = unaryFeat.shape[-1] + int(addConstFeature)
-    nUnaryFeat *= numberOfLabels - int(numberOfLabels==2)
-
-    if len(fBinary)>0:
-        binaryFeat = getFeat(fBinary, tImg)
-        binaryFeat = numpy.nan_to_num(numpy.concatenate(binaryFeat,axis=2).view(numpy.ndarray))
-        nBinaryFeat = binaryFeat.shape[-1] + int(addConstFeature)
-        nWeights  = nUnaryFeat + nBinaryFeat
-    else:
-        nBinaryFeat = 0
-    print "------------------------------------------------"
-    print "nTrain",nTrain,"nTest",nTest
-    print "nWeights",nWeights,"(",nUnaryFeat,nBinaryFeat,")"
-    print "------------------------------------------------"
-
-    train_set = []
-    tentative_test_set = []
-
-    for i,(img,gt) in enumerate(zip(imgs,gts)):
-        if(i<nTrain):
-            train_set.append((img,gt))
-        else:
-            tentative_test_set.append((img,gt))
-
-
-    dataset = createDataset(numWeights=nWeights)
-    weights = dataset.getWeights()
-    uWeightIds = numpy.arange(nUnaryFeat ,dtype='uint64')
-    if numberOfLabels != 2:
-        uWeightIds = uWeightIds.reshape([numberOfLabels,-1])
-    else:
-        uWeightIds = uWeightIds.reshape([1,-1])
-    bWeightIds = numpy.arange(start=nUnaryFeat,stop=nWeights,dtype='uint64')
-
-    def makeModel(img,gt):
-        shape = gt.shape[0:2]
-        numVar = shape[0] * shape[1]
-
-        # make model
-        gm = graphicalModel(numpy.ones(numVar)*numberOfLabels)
-
-
-
-
-        # compute features
-        unaryFeat = getFeat(fUnary, img)
-        unaryFeat = numpy.nan_to_num(numpy.concatenate(unaryFeat,axis=2).view(numpy.ndarray))
-        unaryFeat  = unaryFeat.reshape([numVar,-1])
-        
-
-
-
-        # add unaries
-        lUnaries = lUnaryFunctions(weights =weights,numberOfLabels = numberOfLabels, 
-                                    features=unaryFeat, weightIds = uWeightIds,
-                                    featurePolicy= FeaturePolicy.sharedBetweenLabels,
-                                    makeFirstEntryConst=numberOfLabels==2, addConstFeature=addConstFeature)
-        fids = gm.addFunctions(lUnaries)
-        gm.addFactors(fids, numpy.arange(numVar))
-
-
-        if len(fBinary)>0:
-            binaryFeat = getFeat(fBinary, img)
-            binaryFeat = numpy.nan_to_num(numpy.concatenate(binaryFeat,axis=2).view(numpy.ndarray))
-            binaryFeat  = binaryFeat.reshape([numVar,-1])
-
-            # add second order
-            vis2Order=gridVis(shape[0:2],True)
-
-            fU = binaryFeat[vis2Order[:,0],:]
-            fV = binaryFeat[vis2Order[:,1],:]
-            fB  = (fU +  fV / 2.0)
-            
-            lp = lPottsFunctions(weights=weights, numberOfLabels=numberOfLabels,
-                                          features=fB, weightIds=bWeightIds,
-                                          addConstFeature=addConstFeature)
-            gm.addFactors(gm.addFunctions(lp), vis2Order) 
-
-        return gm
-
-    # make training models
-    # pbar = getPbar(nTrain,"Training Models")
-    # pbar.start()
-    for i,(img,gt) in enumerate(train_set):
-        gm = makeModel(img, gt)
-        dataset.pushBackInstance(gm,gt.reshape(-1).astype(label_type))
-        # pbar.update(i)
-    # pbar.finish()
-
-
-    # make test models
-    test_set = []
-    # pbar = getPbar(nTest,"Test Models")
-    # pbar.start()
-    for i,(img,gt) in enumerate(tentative_test_set):
-        gm = makeModel(img, gt)
-        test_set.append((img, gt, gm))
-    #     pbar.update(i)
-    # pbar.finish()
-
-    return dataset, test_set
-
-
-
-def superpixelDataset(imgs,sps, gts, numberOfLabels, fUnary, fBinary, addConstFeature, trainFraction=0.75):
-    try:
-        import vigra
-    except:
-        raise ImportError("cannot import vigra which is needed for superpixelDataset")
-
-    # train test
-    nImg = len(imgs)
-    nTrain = int(float(nImg)*trainFraction+0.5)
-    nTest = (nImg-nTrain)
-    
-    def getFeat(fComp, im, topoShape=False):
-        res = []
-        if(topoShape):
-            shape = im.shape[0:2]
-            tshape = [2*s-1 for s in shape]
-            iiimg = vigra.sampling.resize(im, tshape)
-        else:
-            iiimg = im
-        for f in fComp:
-            r = f(iiimg)
-            if r.ndim == 2:
-                r = r[:,:, None]
-            res.append(r)
-        return res
-
-    # compute features for a single image
-    tImg = imgs[0]
-    unaryFeat = getFeat(fUnary, tImg)
-    unaryFeat = numpy.nan_to_num(numpy.concatenate(unaryFeat,axis=2).view(numpy.ndarray))
-    nUnaryFeat = unaryFeat.shape[-1] + int(addConstFeature)
-    nUnaryFeat *= numberOfLabels - int(numberOfLabels==2)
-    if len(fBinary)>0:
-        binaryFeat = getFeat(fBinary, tImg)
-        binaryFeat = numpy.nan_to_num(numpy.concatenate(binaryFeat,axis=2).view(numpy.ndarray))
-        nBinaryFeat = binaryFeat.shape[-1] + int(addConstFeature)
-    else:
-        nBinaryFeat =0
-
-    nWeights  = nUnaryFeat + nBinaryFeat
-
-    print "------------------------------------------------"
-    print "nTrain",nTrain,"nTest",nTest
-    print "nWeights",nWeights,"(",nUnaryFeat,nBinaryFeat,")"
-    print "------------------------------------------------"
-
-    train_set = []
-    tentative_test_set = []
-
-    for i,(img,sp,gt) in enumerate(zip(imgs,sps,gts)):
-        if(i<nTrain):
-            train_set.append((img,sp,gt))
-        else:
-            tentative_test_set.append((img,sp,gt))
-
-
-    dataset = createDataset(numWeights=nWeights)
-    weights = dataset.getWeights()
-    uWeightIds = numpy.arange(nUnaryFeat ,dtype='uint64')
-    if numberOfLabels != 2:
-        uWeightIds = uWeightIds.reshape([numberOfLabels,-1])
-    else:
-        uWeightIds = uWeightIds.reshape([1,-1])
-
-    if len(fBinary)>0:
-        bWeightIds = numpy.arange(start=nUnaryFeat,stop=nWeights,dtype='uint64')
-
-
-
-
-
-    def makeModel(img,sp,gt):
-        assert sp.min() == 0
-        shape = img.shape[0:2]
-        gg = vigra.graphs.gridGraph(shape)
-        rag = vigra.graphs.regionAdjacencyGraph(gg,sp)
-        numVar = rag.nodeNum
-        assert rag.nodeNum == rag.maxNodeId +1
-
-        # make model
-        gm = graphicalModel(numpy.ones(numVar)*numberOfLabels)
-
-        assert gm.numberOfVariables == rag.nodeNum 
-        assert gm.numberOfVariables == rag.maxNodeId +1
-
-        # compute features
-        unaryFeat = getFeat(fUnary, img)
-        unaryFeat = numpy.nan_to_num(numpy.concatenate(unaryFeat,axis=2).view(numpy.ndarray)).astype('float32')
-        unaryFeat = vigra.taggedView(unaryFeat,'xyc')
-        accList = []
-
-        #for c in range(unaryFeat.shape[-1]):
-        #    cUnaryFeat = unaryFeat[:,:,c]
-        #    cAccFeat = rag.accumulateNodeFeatures(cUnaryFeat)[:,None]
-        #    accList.append(cAccFeat)
-        #accUnaryFeat = numpy.concatenate(accList,axis=1)
-        accUnaryFeat = rag.accumulateNodeFeatures(unaryFeat)#[:,None]
-
-
-        #print accUnaryFeat.shape
-
-        #accUnaryFeat = rag.accumulateNodeFeatures(unaryFeat[:,:,:])
-        #accUnaryFeat = vigra.taggedView(accUnaryFeat,'nc')
-        #accUnaryFeat = accUnaryFeat[1:accUnaryFeat.shape[0],:]
-
-      
-
-
-
-        #binaryFeat  = binaryFeat.reshape([numVar,-1])
-
-
-
-        # add unaries
-        lUnaries = lUnaryFunctions(weights =weights,numberOfLabels = numberOfLabels, 
-                                            features=accUnaryFeat, weightIds = uWeightIds,
-                                            featurePolicy= FeaturePolicy.sharedBetweenLabels,
-                                            makeFirstEntryConst=numberOfLabels==2, addConstFeature=addConstFeature)
-        fids = gm.addFunctions(lUnaries)
-        gm.addFactors(fids, numpy.arange(numVar))
-
-        
-        if len(fBinary)>0:
-            binaryFeat = getFeat(fBinary, img, topoShape=False)
-            binaryFeat = numpy.nan_to_num(numpy.concatenate(binaryFeat,axis=2).view(numpy.ndarray)).astype('float32')
-            edgeFeat = vigra.graphs.edgeFeaturesFromImage(gg, binaryFeat)
-            accBinaryFeat = rag.accumulateEdgeFeatures(edgeFeat)
-
-            uvIds =  numpy.sort(rag.uvIds(), axis=1)
-            assert uvIds.min()==0
-            assert uvIds.max()==gm.numberOfVariables-1
-
-
-
-        
-            lp = lPottsFunctions(weights=weights, numberOfLabels=numberOfLabels,
-                                          features=accBinaryFeat, weightIds=bWeightIds,
-                                          addConstFeature=addConstFeature)
-            fids = gm.addFunctions(lp)
-            gm.addFactors(fids, uvIds) 
-
-        return gm
-
-    # make training models
-    # pbar = getPbar(nTrain,"Training Models")
-    # pbar.start()
-    for i,(img,sp,gt) in enumerate(train_set):
-        gm = makeModel(img,sp, gt)
-        dataset.pushBackInstance(gm,gt.astype(label_type))
-        pbar.update(i)
-    pbar.finish()
-
-
-    # make test models
-    test_set = []
-    # pbar = getPbar(nTest,"Test Models")
-    # pbar.start()
-    for i,(img,sp,gt) in enumerate(tentative_test_set):
-        gm = makeModel(img,sp, gt)
-        test_set.append((img, sp, gm))
-    #     pbar.update(i)
-    # pbar.finish()
-
-    return dataset, test_set
diff --git a/src/interfaces/python/opengm/learning/helper.hxx b/src/interfaces/python/opengm/learning/helper.hxx
deleted file mode 100644
index e95034a..0000000
--- a/src/interfaces/python/opengm/learning/helper.hxx
+++ /dev/null
@@ -1,330 +0,0 @@
-#ifndef HELPER_HXX
-#define HELPER_HXX
-
-#include <boost/python.hpp>
-#include <boost/python/module.hpp>
-#include <opengm/python/opengmpython.hxx>
-#include <opengm/python/converter.hxx>
-#include <opengm/python/numpyview.hxx>
-
-#include <opengm/inference/icm.hxx>
-#include <opengm/inference/lazyflipper.hxx>
-#include <opengm/inference/self_fusion.hxx>
-#include <opengm/learning/gridsearch-learning.hxx>
-#include <opengm/inference/messagepassing/messagepassing.hxx>
-
-#ifdef WITH_CPLEX
-#include <opengm/inference/lpcplex.hxx>
-#include <opengm/inference/multicut.hxx>
-#endif
-
-#ifdef WITH_QPBO
-#include <opengm/inference/external/qpbo.hxx>
-#include <opengm/inference/reducedinference.hxx>
-#endif
-
-#ifdef WITH_TRWS
-#include <opengm/inference/external/trws.hxx>
-#endif
-
-
-namespace opengm{
-
-template<class LEARNER>
-class LearnerInferenceSuite: public boost::python::def_visitor<LearnerInferenceSuite<LEARNER> >{
-public:
-    friend class boost::python::def_visitor_access;
-
-    LearnerInferenceSuite(){
-
-    }
-
-    template<class INF>
-    static void pyLearn_Inf(LEARNER & learner, const typename INF::Parameter & param)
-    {
-        learner. template learn<INF>(param);
-    }
-
-    #ifdef WITH_QPBO
-    template<class INF>
-    static void pyLearn_ReducedInf(
-        LEARNER & learner, 
-        const typename INF::Parameter & param,
-        const bool persistency,
-        const bool tentacles,
-        const bool connectedComponents
-    )
-    {
-
-        typedef typename INF::GraphicalModelType GmType;
-        typedef typename opengm::ReducedInferenceHelper<GmType>::InfGmType RedInfGm;
-
-        // rebind the inference to the RedInfGm
-        typedef typename INF:: template RebindGm<RedInfGm>::type RedInfRebindInf;
-
-
-        typedef typename RedInfRebindInf::Parameter RedInfRebindInfParam;
-        typedef opengm::ReducedInference<GmType, opengm::Minimizer, RedInfRebindInf> RedInf;
-        typedef typename RedInf::Parameter RedInfParam;
-
-        RedInfRebindInfParam redInfRebindInfParam(param);
-
-        RedInfParam redInfPara;
-        redInfPara.subParameter_ = redInfRebindInfParam;
-        redInfPara.Persistency_ = persistency;
-        redInfPara.Tentacle_ = tentacles;
-        redInfPara.ConnectedComponents_ = connectedComponents;
-
-        learner. template learn<RedInf>(redInfPara);
-    }
-    #endif
-
-
-    #ifdef WITH_QPBO
-    template<class INF>
-    static void pyLearn_ReducedInfSelfFusion(
-        LEARNER & learner, 
-        const typename INF::Parameter & param,
-        const bool persistency,
-        const bool tentacles,
-        const bool connectedComponents
-    )
-    {
-
-        typedef typename INF::GraphicalModelType GmType;
-        typedef typename opengm::ReducedInferenceHelper<GmType>::InfGmType RedInfGm;
-
-        // rebind the inference to the RedInfGm
-        typedef typename INF:: template RebindGm<RedInfGm>::type RedInfRebindInf;
-
-
-        typedef typename RedInfRebindInf::Parameter RedInfRebindInfParam;
-        typedef opengm::ReducedInference<GmType, opengm::Minimizer, RedInfRebindInf> RedInf;
-        typedef typename RedInf::Parameter RedInfParam;
-
-        RedInfRebindInfParam redInfRebindInfParam(param);
-
-        RedInfParam redInfPara;
-        redInfPara.subParameter_ = redInfRebindInfParam;
-        redInfPara.Persistency_ = persistency;
-        redInfPara.Tentacle_ = tentacles;
-        redInfPara.ConnectedComponents_ = connectedComponents;
-
-
-        typedef opengm::SelfFusion<RedInf> SelfFusionInf;
-        typedef typename SelfFusionInf::Parameter SelfFusionInfParam;
-        SelfFusionInfParam sfParam;
-
-        sfParam.infParam_ = redInfPara;
-        sfParam.fuseNth_ = 10;
-        sfParam.maxSubgraphSize_ = 2;
-        sfParam.reducedInf_ = true;
-        sfParam.tentacles_ = false;
-        sfParam.connectedComponents_ = true;
-        sfParam.fusionTimeLimit_ = 100.0;
-        sfParam.numStopIt_ = 10.0;
-        sfParam.fusionSolver_ = SelfFusionInf::QpboFusion;
-
-        learner. template learn<SelfFusionInf>(sfParam);
-    }
-    #endif
-
-
-    template<class INF>
-    static void pyLearn_SelfFusion(
-        LEARNER & learner, 
-        const typename INF::Parameter & param,
-        const size_t fuseNth,
-        const std::string & fusionSolver,
-        const UInt64Type maxSubgraphSize,
-        const bool reducedInf,
-        const bool connectedComponents,
-        const double fusionTimeLimit,
-        const size_t numStopIt
-    )
-    {
-
-        typedef typename INF::GraphicalModelType GmType;
-        
-        typedef opengm::SelfFusion<INF> SelfFusionInf;
-        typedef typename SelfFusionInf::Parameter SelfFusionInfParam;
-
-
-        SelfFusionInfParam sfParam;
-
-        if(fusionSolver ==std::string("qpbo")){
-            sfParam.fusionSolver_ = SelfFusionInf::QpboFusion;
-        }
-        else if(fusionSolver ==std::string("cplex")){
-            sfParam.fusionSolver_ = SelfFusionInf::CplexFusion;
-        }
-        else if(fusionSolver ==std::string("lf")){
-            sfParam.fusionSolver_ = SelfFusionInf::LazyFlipperFusion;
-        }
-
-        sfParam.infParam_ = param;
-        sfParam.fuseNth_ = fuseNth;
-        sfParam.maxSubgraphSize_ = maxSubgraphSize;
-        sfParam.reducedInf_ = reducedInf;
-        sfParam.tentacles_ = false;
-        sfParam.connectedComponents_ = connectedComponents;
-        sfParam.fusionTimeLimit_ = fusionTimeLimit;
-        sfParam.numStopIt_ = numStopIt;
-
-        learner. template learn<SelfFusionInf>(sfParam);
-    }
-
-
-
-
-
-
-
-    template <class classT>
-    void visit(classT& c) const{
-        // SOME INFERENCE METHODS
-        typedef typename LEARNER::GMType GMType;
-        typedef typename LEARNER::Parameter PyLearnerParam;
-        typedef typename LEARNER::DatasetType DatasetType;
-        typedef opengm::Minimizer ACC;
-
-        typedef opengm::ICM<GMType, ACC> IcmInf;
-        typedef opengm::LazyFlipper<GMType, ACC> LazyFlipperInf;
-        typedef opengm::BeliefPropagationUpdateRules<GMType, ACC> UpdateRulesType;
-        typedef opengm::MessagePassing<GMType, ACC, UpdateRulesType, opengm::MaxDistance> BpInf;
-
-        #ifdef WITH_CPLEX
-            typedef opengm::LPCplex<GMType, ACC> Cplex;
-            typedef opengm::Multicut<GMType, ACC> Multicut;
-        #endif
-
-        #ifdef WITH_QPBO
-            typedef opengm::external::QPBO<GMType>  QpboExternal;
-        #endif
-
-        #ifdef WITH_TRWS
-            typedef opengm::external::TRWS<GMType>  TrwsExternal;
-        #endif
-
-        c
-            //.def("_learn",&pyLearn_Inf<IcmInf>)
-            //.def("_learn",&pyLearn_Inf<LazyFlipperInf>)
-            //.def("_learn",&pyLearn_Inf<BpInf>)
-            #ifdef WITH_CPLEX
-            //.def("_learn",&pyLearn_Inf<Cplex>) 
-            .def("_learn",&pyLearn_Inf<Multicut>)
-            #endif
-            #ifdef WITH_QPBO
-            .def("_learn",&pyLearn_Inf<QpboExternal>)
-            #endif
-            #ifdef WITH_TRWS
-            .def("_learn",&pyLearn_Inf<TrwsExternal>)
-            #endif
-
-            #if 0
-            // REDUCED INFERENCE
-            #ifdef WITH_QPBO
-                .def("_learnReducedInf",&pyLearn_ReducedInf<LazyFlipperInf>)
-                #ifdef WITH_TRWS
-                .def("_learnReducedInf",&pyLearn_ReducedInf<TrwsExternal>)
-                #endif
-                #ifdef WITH_CPLEX
-                .def("_learnReducedInf",&pyLearn_ReducedInf<Cplex>)
-                #endif
-            #endif
-
-            // SELF FUSION
-            #ifdef WITH_TRWS
-            .def("_learnSelfFusion",&pyLearn_SelfFusion<TrwsExternal>)
-            #endif
-
-            // REDUCED INFERNCE SELF FUSION
-            #if defined(WITH_TRWS) && defined(WITH_QPBO)
-            .def("_learnReducedInfSelfFusion",&pyLearn_ReducedInfSelfFusion<TrwsExternal>)
-            #endif
-            #endif
-        ;
-    }
-};
-
-
-
-template<class DS>
-class DatasetInferenceSuite: public boost::python::def_visitor<DatasetInferenceSuite<DS> >{
-public:
-   friend class boost::python::def_visitor_access;
-
-   DatasetInferenceSuite(){
-
-   }
-
-   template<class INF>
-   static typename DS::ValueType pyGetLossWithInf(DS & ds, const typename INF::Parameter & param, const size_t i)
-   {
-       return ds. template getLoss<INF>(param, i);
-   }
-
-   template<class INF>
-   static typename DS::ValueType pyGetTotalLossWithInf(DS & ds, const typename INF::Parameter & param)
-   {
-       return ds. template getTotalLoss<INF>(param);
-   }
-
-   template <class classT>
-   void visit(classT& c) const{
-       // SOME INFERENCE METHODS
-       typedef typename DS::GMType GMType;
-       typedef opengm::Minimizer ACC;
-
-       typedef opengm::ICM<GMType, ACC> IcmInf;
-       typedef opengm::LazyFlipper<GMType, ACC> LazyFlipperInf;
-       typedef opengm::BeliefPropagationUpdateRules<GMType, ACC> UpdateRulesType;
-       typedef opengm::MessagePassing<GMType, ACC, UpdateRulesType, opengm::MaxDistance> BpInf;
-
-#ifdef WITH_CPLEX
-       typedef opengm::LPCplex<GMType, ACC> Cplex;
-       typedef opengm::Multicut<GMType, ACC> Multicut;
-#endif
-#ifdef WITH_QPBO
-       typedef opengm::external::QPBO<GMType>  QpboExternal;
-#endif
-#ifdef WITH_TRWS
-       typedef opengm::external::TRWS<GMType>  TrwsExternal;
-#endif
-
-
-
-
-
-
-      c
-          .def("_getLoss",&pyGetLossWithInf<IcmInf>)
-          .def("_getTotalLoss",&pyGetTotalLossWithInf<IcmInf>)
-          .def("_getLoss",&pyGetLossWithInf<LazyFlipperInf>)
-          .def("_getTotalLoss",&pyGetTotalLossWithInf<LazyFlipperInf>)
-          .def("_getLoss",&pyGetLossWithInf<BpInf>)
-          .def("_getTotalLoss",&pyGetTotalLossWithInf<BpInf>)
-#ifdef WITH_CPLEX
-          .def("_getLoss",&pyGetLossWithInf<Cplex>)
-          .def("_getTotalLoss",&pyGetTotalLossWithInf<Cplex>)
-          .def("_getLoss",&pyGetLossWithInf<Multicut>)
-          .def("_getTotalLoss",&pyGetTotalLossWithInf<Multicut>)
-#endif
-#ifdef WITH_QPBO
-          .def("_getLoss",&pyGetLossWithInf<QpboExternal>)
-          .def("_getTotalLoss",&pyGetTotalLossWithInf<QpboExternal>)
-#endif
-#ifdef WITH_TRWS
-          .def("_getLoss",&pyGetLossWithInf<TrwsExternal>)
-          .def("_getTotalLoss",&pyGetTotalLossWithInf<TrwsExternal>)
-#endif
-      ;
-   }
-};
-
-
-
-} // namespace opengm
-
-#endif // HELPER_HXX
-
diff --git a/src/interfaces/python/opengm/learning/learning.cxx b/src/interfaces/python/opengm/learning/learning.cxx
deleted file mode 100644
index 195b2ac..0000000
--- a/src/interfaces/python/opengm/learning/learning.cxx
+++ /dev/null
@@ -1,91 +0,0 @@
-#include <boost/python.hpp>
-#include <stddef.h>
-#include <boost/python/suite/indexing/vector_indexing_suite.hpp>
-#include <opengm/python/opengmpython.hxx>
-#include <opengm/python/converter.hxx>
-#include <opengm/python/numpyview.hxx>
-#include <opengm/python/pythonfunction.hxx>
-
-#include <opengm/learning/loss/hammingloss.hxx>
-#include <opengm/learning/loss/generalized-hammingloss.hxx>
-#include <opengm/learning/loss/noloss.hxx>
-#include <opengm/learning/loss/flexibleloss.hxx>
-
-#if defined(WITH_CPLEX) || defined(WITH_GUROBI)
-#include <opengm/learning/bundle-optimizer.hxx>
-#endif
-
-
-namespace bp = boost::python;
-namespace op = opengm::python;
-namespace ol = opengm::learning;
-
-namespace opengm{
-
-    void export_weights();
-    void export_weight_constraints();
-
-    template<class GM, class LOSS>
-    void export_dataset(const std::string& className);
-
-    template<class GM>
-    void export_loss();
-
-    template<class DATASET>
-    void export_grid_search_learner(const std::string & clsName);
-
-    template<class DATASET, class OPTIMIZER>
-    void export_struct_max_margin_bundle_learner(const std::string & clsName);
-
-    //template<class DATASET>
-    //void export_max_likelihood_learner(const std::string & clsName);
-
-    template<class DATASET>
-    void export_struct_perceptron_learner(const std::string & clsName);
-
-    template<class DATASET>
-    void export_subgradient_ssvm_learner(const std::string & clsName);
-
-    template<class DATASET>
-    void export_rws_learner(const std::string & clsName);
-
-    template<class GM_ADDER,class GM_MULT>  
-    void export_lfunction_generator();
-
-
-}
-
-
-
-BOOST_PYTHON_MODULE_INIT(_learning) {
-
-
-    Py_Initialize();
-    PyEval_InitThreads();
-    bp::numeric::array::set_module_and_type("numpy", "ndarray");
-    bp::docstring_options doc_options(true,true,false);
-
-
-    opengm::export_weights();
-    opengm::export_weight_constraints();
-    // function exporter
-    opengm::export_lfunction_generator<op::GmAdder,op::GmMultiplier>();
-
-    // export loss
-    opengm::export_loss<op::GmAdder>();
-
-    // templated datasets
-    opengm::export_dataset<op::GmAdder, ol::FlexibleLoss >("DatasetWithFlexibleLoss");
-
-
-
-    opengm::export_grid_search_learner<op::GmAdderFlexibleLossDataset>("GridSearch_FlexibleLoss");
-    opengm::export_struct_perceptron_learner<op::GmAdderFlexibleLossDataset>("StructPerceptron_FlexibleLoss");
-    opengm::export_subgradient_ssvm_learner<op::GmAdderFlexibleLossDataset>("SubgradientSSVM_FlexibleLoss");
-    //opengm::export_max_likelihood_learner<op::GmAdderFlexibleLossDataset>("MaxLikelihood_FlexibleLoss");
-    opengm::export_rws_learner<op::GmAdderFlexibleLossDataset>("Rws_FlexibleLoss");
-    
-    #if defined(WITH_CPLEX) || defined(WITH_GUROBI)
-        opengm::export_struct_max_margin_bundle_learner< op::GmAdderFlexibleLossDataset, ol::BundleOptimizer<op::GmValueType> >("StructMaxMargin_Bundle_FlexibleLoss");
-    #endif
-}
diff --git a/src/interfaces/python/opengm/learning/pyDataset.cxx b/src/interfaces/python/opengm/learning/pyDataset.cxx
deleted file mode 100644
index 5d8068a..0000000
--- a/src/interfaces/python/opengm/learning/pyDataset.cxx
+++ /dev/null
@@ -1,104 +0,0 @@
-#include <boost/python.hpp>
-#include <boost/python/suite/indexing/vector_indexing_suite.hpp>
-#include <stdexcept>
-#include <stddef.h>
-
-#include <opengm/learning/dataset/editabledataset.hxx>
-#include <opengm/learning/dataset/dataset_io.hxx>
-#include <opengm/learning/loss/hammingloss.hxx>
-#include <opengm/learning/loss/generalized-hammingloss.hxx>
-#include <opengm/learning/loss/noloss.hxx>
-#include <opengm/python/numpyview.hxx>
-#include <opengm/python/opengmpython.hxx>
-#include <opengm/python/converter.hxx>
-
-#define DefaultErrorFn DefaultErrorFn_TrwsExternal_DS
-#include "helper.hxx"
-
-using namespace boost::python;
-
-namespace opengm{
-
-template<class GM, class LOSS>
-void pySetInstanceWithLossParam(opengm::datasets::EditableDataset<GM, LOSS>& ds,
-                   const size_t i,
-                   const GM& gm,
-                   const opengm::python::NumpyView<typename GM::LabelType,1>  gt,
-                   const typename LOSS::Parameter & param) {
-    std::vector<typename GM::LabelType> gt_vector(gt.begin(), gt.end());
-    ds.setInstance(i, gm, gt_vector, param);
-}
-
-template<class GM, class LOSS>
-void pySetInstance(opengm::datasets::EditableDataset<GM, LOSS>& ds,
-                   const size_t i,
-                   const GM& gm,
-                   const opengm::python::NumpyView<typename GM::LabelType,1>& gt
-                   ) {
-    pySetInstanceWithLossParam(ds, i, gm, gt, typename LOSS::Parameter());
-}
-
-template<class GM, class LOSS>
-void pyPushBackInstanceWithLossParam(opengm::datasets::EditableDataset<GM,LOSS>& ds,
-                        const GM& gm,
-                        const opengm::python::NumpyView<typename GM::LabelType,1>& gt,
-                        const typename LOSS::Parameter & param) {
-    std::vector<typename GM::LabelType> gt_vector(gt.begin(), gt.end());
-    ds.pushBackInstance(gm, gt_vector, param);
-}
-
-template<class GM, class LOSS>
-void pyPushBackInstance(opengm::datasets::EditableDataset<GM,LOSS>& ds,
-                        const GM& gm,
-                        const opengm::python::NumpyView<typename GM::LabelType,1>& gt
-                        ) {
-    pyPushBackInstanceWithLossParam(ds, gm, gt, typename LOSS::Parameter());
-}
-
-template<class GM, class LOSS>
-void pySaveDataset(opengm::datasets::EditableDataset<GM,LOSS >& ds,
-                   const std::string datasetpath,
-                   const std::string prefix) {
-    opengm::datasets::DatasetSerialization::save(ds, datasetpath, prefix);
-}
-
-template<class GM, class LOSS>
-void pyLoadDataset(opengm::datasets::EditableDataset<GM,LOSS >& ds,
-                   const std::string datasetpath,
-                   const std::string prefix) {
-    opengm::datasets::DatasetSerialization::loadAll(datasetpath, prefix, ds);
-}
-
-template<class GM, class LOSS>
-void export_dataset(const std::string& className){
-    typedef opengm::datasets::EditableDataset<GM,LOSS > PyDataset;
-
-   class_<PyDataset > (className.c_str(),init<size_t>())
-           .def("lockModel", &PyDataset::lockModel)
-           .def("unlockModel", &PyDataset::unlockModel)
-           .def("getModel", &PyDataset::getModel, return_internal_reference<>())
-           .def("getModelWithLoss", &PyDataset::getModelWithLoss, return_internal_reference<>())
-           .def("getGT", &PyDataset::getGT, return_internal_reference<>())
-           .def("getWeights", &PyDataset::getWeights, return_internal_reference<>())
-           .def("getNumberOfWeights", &PyDataset::getNumberOfWeights)
-           .def("getNumberOfModels", &PyDataset::getNumberOfModels)
-           .def("setInstance", &pySetInstance<GM,LOSS>)
-           .def("setInstanceWithLossParam", &pySetInstanceWithLossParam<GM,LOSS>)
-           .def("setInstance", &pySetInstanceWithLossParam<GM,LOSS>)
-           .def("pushBackInstance", &pyPushBackInstance<GM,LOSS>)
-           .def("pushBackInstanceWithLossParam", &pyPushBackInstanceWithLossParam<GM,LOSS>)
-           .def("pushBackInstance", &pyPushBackInstanceWithLossParam<GM,LOSS>)
-           .def("setWeights", &PyDataset::setWeights)
-           .def("save", &pySaveDataset<GM, LOSS>)
-           .def("load", &pyLoadDataset<GM, LOSS>)
-           .def(DatasetInferenceSuite<PyDataset>())
-   ;
-
-}
-
-
-//template void export_dataset<opengm::python::GmAdder, opengm::learning::HammingLoss> (const std::string& className);
-//template void export_dataset<opengm::python::GmAdder, opengm::learning::NoLoss> (const std::string& className);
-template void export_dataset<opengm::python::GmAdder, opengm::learning::FlexibleLoss> (const std::string& className);
-
-}
diff --git a/src/interfaces/python/opengm/learning/pyGridSearchLearner.cxx b/src/interfaces/python/opengm/learning/pyGridSearchLearner.cxx
deleted file mode 100644
index 412117c..0000000
--- a/src/interfaces/python/opengm/learning/pyGridSearchLearner.cxx
+++ /dev/null
@@ -1,64 +0,0 @@
-#include <boost/python.hpp>
-#include <boost/python/module.hpp>
-#include <opengm/python/opengmpython.hxx>
-#include <opengm/python/converter.hxx>
-#include <opengm/python/numpyview.hxx>
-
-#define DefaultErrorFn DefaultErrorFn_TrwsExternalB
-#include "helper.hxx"
-
-namespace bp = boost::python;
-namespace op = opengm::python;
-namespace ol = opengm::learning;
-
-namespace opengm{
-
-
-    template<class PARAM>
-    PARAM * pyGridSearchParamConstructor(
-        op::NumpyView<double> lowerBound,
-        op::NumpyView<double> upperBound,
-        op::NumpyView<size_t> nTestPoints
-    ){
-        PARAM * p  = new PARAM();
-        p->parameterUpperbound_.assign(lowerBound.begin(), lowerBound.end());
-        p->parameterLowerbound_.assign(upperBound.begin(), upperBound.end());
-        p->testingPoints_.assign(nTestPoints.begin(), nTestPoints.end());
-        return p;
-    }
-
-    template<class L >
-    L * pyGridSearchConstructor(
-        typename L::DatasetType & dataset,
-        const typename L::Parameter & param
-    ){
-        L * l  = new L(dataset, param);
-        return l;
-    }
-
-    template<class DATASET>
-    void export_grid_search_learner(const std::string & clsName){
-        typedef learning::GridSearchLearner<DATASET> PyLearner;
-        typedef typename PyLearner::Parameter PyLearnerParam;
-
-        const std::string paramClsName = clsName + std::string("Parameter");
-
-
-        bp::class_<PyLearnerParam>(paramClsName.c_str(), bp::init<>())
-            .def("__init__", make_constructor(&pyGridSearchParamConstructor<PyLearnerParam> ,boost::python::default_call_policies()))
-        ;
-
-        bp::class_<PyLearner>( clsName.c_str(), bp::no_init )
-        .def("__init__", make_constructor(&pyGridSearchConstructor<PyLearner> ,boost::python::default_call_policies()))
-        .def(LearnerInferenceSuite<PyLearner>())
-        ;
-    }
-
-    template void 
-    export_grid_search_learner<op::GmAdderFlexibleLossDataset> (const std::string& className);
-
-    //template void 
-    //export_grid_search_learner<op::GmAdderGeneralizedHammingLossDataset> (const std::string& className);
-}
-
-
diff --git a/src/interfaces/python/opengm/learning/pyLFunctionGen.cxx b/src/interfaces/python/opengm/learning/pyLFunctionGen.cxx
deleted file mode 100644
index e8bb186..0000000
--- a/src/interfaces/python/opengm/learning/pyLFunctionGen.cxx
+++ /dev/null
@@ -1,309 +0,0 @@
-#include <boost/python.hpp>
-#include <boost/python/module.hpp>
-#include <opengm/python/opengmpython.hxx>
-#include <opengm/python/converter.hxx>
-#include <opengm/python/numpyview.hxx>
-
-
-#include "opengm/graphicalmodel/weights.hxx"
-#include "opengm/functions/learnable/lpotts.hxx"
-#include "opengm/functions/learnable/lunary.hxx"
-#include "opengm/functions/learnable/lweightedsum_of_functions.hxx"
-
-#include "../opengmcore/functionGenBase.hxx"
-
-
-namespace bp = boost::python;
-namespace op = opengm::python;
-namespace ol = opengm::learning;
-namespace ofl = opengm::functions::learnable;
-namespace opengm{
-
-
-
-    template<class GM_ADDER,class GM_MULT>
-    class LPottsFunctionGen :
-    public FunctionGeneratorBase<GM_ADDER,GM_MULT>
-    {
-    public:       
-        typedef typename GM_ADDER::ValueType ValueType;
-        typedef typename GM_ADDER::IndexType IndexType;
-        typedef typename GM_ADDER::LabelType LabelType;
-        typedef ol::Weights<ValueType> WeightType;
-        typedef  ofl::LPotts<ValueType, IndexType, LabelType> FType;
-
-        LPottsFunctionGen(
-            WeightType & weights,
-            const size_t numFunctions,
-            const size_t numLabels,
-            op::NumpyView<ValueType, 2> features,
-            op::NumpyView<IndexType, 1> weightIds,
-            const bool addConstFeature
-        ):
-        FunctionGeneratorBase<GM_ADDER,GM_MULT>(),
-        weights_(weights),
-        numFunctions_(numFunctions),
-        numLabels_(numLabels),
-        features_(features.view()),
-        weightIds_(weightIds.begin(), weightIds.end()),
-        addConstFeature_(addConstFeature)
-        {
-            OPENGM_CHECK_OP(features.shape(0), == , numFunctions, "wrong shape");
-            OPENGM_CHECK_OP(features.shape(1)+int(addConstFeature), == , weightIds.shape(0), "wrong shape");
-        }
- 
-
-        template<class GM>
-        std::vector< typename GM::FunctionIdentifier > * addFunctionsGeneric(GM & gm)const{
-
-            typedef typename GM::FunctionIdentifier Fid;
-            typedef std::vector<Fid> FidVector;
-            FidVector * fidVector = new FidVector(numFunctions_);
-
-            const size_t nFeat =features_.shape(1);
-            std::vector<ValueType> fFeat(nFeat+int(addConstFeature_));
-            for(size_t  i=0;i<numFunctions_;++i){
-                for(size_t f=0; f<nFeat; ++f){
-                    fFeat[f] = features_(i,f);
-                }
-                if(addConstFeature_){
-                    fFeat[nFeat] = 1.0;
-                }
-                const FType f(weights_, numLabels_, weightIds_, fFeat);
-                (*fidVector)[i] = gm.addFunction(f);
-            }   
-            return fidVector;
-        }
-
-        virtual std::vector< typename GM_ADDER::FunctionIdentifier > * addFunctions(GM_ADDER & gm)const{
-            return this-> template addFunctionsGeneric<GM_ADDER>(gm);
-        }
-        virtual std::vector< typename GM_MULT::FunctionIdentifier >  * addFunctions(GM_MULT & gm)const{
-            throw RuntimeError("Wrong Operator for Learning");
-            return NULL;
-        }
-    private:
-        WeightType & weights_;
-        size_t numFunctions_;
-        size_t numLabels_;
-        marray::Marray<ValueType>  features_;
-        std::vector<size_t>  weightIds_; 
-        bool addConstFeature_;
-    };
-
-
-
-    template<class GM_ADDER,class GM_MULT>
-    class LUnarySharedFeatFunctionGen :
-    public FunctionGeneratorBase<GM_ADDER,GM_MULT>
-    {
-    public:       
-        typedef typename GM_ADDER::ValueType ValueType;
-        typedef typename GM_ADDER::IndexType IndexType;
-        typedef typename GM_ADDER::LabelType LabelType;
-        typedef ol::Weights<ValueType> WeightType;
-        typedef  ofl::LUnary<ValueType, IndexType, LabelType> FType;
-
-        LUnarySharedFeatFunctionGen(
-            WeightType & weights,
-            const size_t numFunctions,
-            const size_t numLabels,
-            op::NumpyView<ValueType, 2> & features,
-            op::NumpyView<IndexType, 2> & weightIds,
-            const bool makeFirstEntryConst,
-            const bool addConstFeature
-        ):
-        FunctionGeneratorBase<GM_ADDER,GM_MULT>(),
-        weights_(weights),
-        numFunctions_(numFunctions),
-        numLabels_(numLabels),
-        features_(features.view()),
-        //weightIds_(weightIds),
-        makeFirstEntryConst_(makeFirstEntryConst),
-        addConstFeature_(addConstFeature)
-        {
-            //std::cout<<"constructor\n";
-
-            //std::cout<<"    features (1000,1)"<<features(1000,1)<<"\n";
-            //std::cout<<"    features_(1000,1)"<<features_(1000,1)<<"\n";
-            OPENGM_CHECK_OP(features.shape(0), == , numFunctions, "wrong shape");
-            OPENGM_CHECK_OP(weightIds.shape(1), == , features.shape(1) + int(addConstFeature), "wrong shape");
-            OPENGM_CHECK_OP(weightIds.shape(0)+int(makeFirstEntryConst), == ,numLabels, "wrong shape");
-
-
-            const size_t nFeat =features_.shape(1);
-            const size_t nWPerL = nFeat+int(addConstFeature_);
-            const size_t wShape[2] = {numLabels_- int(makeFirstEntryConst_) ,nWPerL};
-
-            wIds_ = marray::Marray<size_t>(wShape, wShape+2);
-
-            //std::cout<<"assignment\n";
-            //std::cout<<"passed wi shape "<<weightIds.shape(0)<<" "<<weightIds.shape(1)<<" given "<<wShape[0]<<" "<<wShape[1]<<"\n";
-            //std::cout<<"wIds_  shape "<<wIds_.shape(0)<<" "<<wIds_.shape(1)<<"\n";
-
-            for(size_t ll=0; ll<wShape[0]; ++ll){
-                for(size_t wi=0; wi<wShape[1]; ++wi){
-                    //std::cout<<"ll "<<ll<<" wi "<<wi<<"\n";
-                    size_t passed =  weightIds(ll,wi);
-                    //std::cout<<"passed "<<passed<<"\n";
-                    wIds_(ll,wi) = passed;
-                }  
-            }
-            //std::cout<<"constructor done\n";
-        }
- 
-
-        template<class GM>
-        std::vector< typename GM::FunctionIdentifier > * addFunctionsGeneric(GM & gm)const{
-            //std::cout<<"&** features_(1000,1)"<<features_(1000,1)<<"\n";
-
-
-
-            typedef typename GM::FunctionIdentifier Fid;
-            typedef std::vector<Fid> FidVector;
-            FidVector * fidVector = new FidVector(numFunctions_);
-
-
-            const size_t nFeat =features_.shape(1);
-            const size_t nWPerL = nFeat+int(addConstFeature_);
-            marray::Marray<ValueType> fFeat(&nWPerL,&nWPerL+1);
-
-
-            // copy the weights once!
-            const size_t wShape[2] = {numLabels_- int(makeFirstEntryConst_) ,nWPerL};
-            marray::Marray<size_t> _weightIds(wShape, wShape+2);
-
-            //for(size_t ll=0; ll<wShape[0]; ++ll)
-            //for(size_t wi=0; wi<wShape[1]; ++wi){
-            //    _weightIds(ll,wi) = weightIds_(ll,wi);
-            //}    
-
-
-            for(size_t  i=0;i<numFunctions_;++i){
-                // copy the features for that instance
-                for(size_t f=0; f<nFeat; ++f){
-                    //std::cout<<"added feat:"<<features_(i,f)<<"\n";
-                    fFeat(f) = features_(i,f);
-                }
-                if(addConstFeature_){
-                    fFeat(nFeat) = 1.0;
-                }
-                FType f(weights_, numLabels_, wIds_, fFeat, makeFirstEntryConst_);
-
-                //std::cout<<"INTERNAL TEST\n";
-                //for(size_t l=0;l<numLabels_; ++l){
-                //    std::cout<<"l "<<l<<" f(l) = "<<f(&l)<<"\n";
-                //}
-
-                (*fidVector)[i] = gm.addFunction(f);
-            }   
-            return fidVector;
-        }
-
-        virtual std::vector< typename GM_ADDER::FunctionIdentifier > * addFunctions(GM_ADDER & gm)const{
-            return this-> template addFunctionsGeneric<GM_ADDER>(gm);
-        }
-        virtual std::vector< typename GM_MULT::FunctionIdentifier >  * addFunctions(GM_MULT & gm)const{
-            throw RuntimeError("Wrong Operator for Learning");
-            return NULL;
-        }
-    private:
-        WeightType & weights_;
-        size_t numFunctions_;
-        size_t numLabels_;
-
-        marray::Marray<ValueType> features_;
-        //op::NumpyView<ValueType, 2>  features_;
-        op::NumpyView<IndexType, 2>  weightIds_;
-        bool makeFirstEntryConst_;
-        bool addConstFeature_;
-        marray::Marray<size_t> wIds_;
-    };
-
-
-    template<class GM_ADDER,class GM_MULT>
-    FunctionGeneratorBase<GM_ADDER,GM_MULT> * lunarySharedFeatFunctionGen(
-        ol::Weights<typename GM_ADDER::ValueType> & weights,
-        const size_t numFunctions,
-        const size_t numLabels,
-        opengm::python::NumpyView<typename GM_ADDER::ValueType,2> features,
-        opengm::python::NumpyView<typename GM_ADDER::IndexType,2> weightIds,
-        const bool makeFirstEntryConst,
-        const bool addConstFeature
-    ){
-        FunctionGeneratorBase<GM_ADDER,GM_MULT> * ptr = 
-            new LUnarySharedFeatFunctionGen<GM_ADDER,GM_MULT>(weights,numFunctions,numLabels,
-                                                              features,weightIds,makeFirstEntryConst,
-                                                              addConstFeature);
-        return ptr;
-    }
-
-
-    template<class GM_ADDER,class GM_MULT>
-    FunctionGeneratorBase<GM_ADDER,GM_MULT> * lpottsFunctionGen(
-        ol::Weights<typename GM_ADDER::ValueType> & weights,
-        const size_t numFunctions,
-        const size_t numLabels,
-        opengm::python::NumpyView<typename GM_ADDER::ValueType,2> features,
-        opengm::python::NumpyView<typename GM_ADDER::IndexType,1> weightIds,
-        const bool addConstFeature
-    ){
-        FunctionGeneratorBase<GM_ADDER,GM_MULT> * ptr = 
-            new LPottsFunctionGen<GM_ADDER,GM_MULT>(weights,numFunctions,numLabels,features,weightIds, addConstFeature);
-        return ptr;
-    }
-
-
-
-
-
-
-
-
-
-
-
-
-    template<class GM_ADDER,class GM_MULT>  
-    void export_lfunction_generator(){
-        typedef LPottsFunctionGen<GM_ADDER, GM_MULT> FGen;
-
-         bp::def("_lpottsFunctionsGen",&lpottsFunctionGen<GM_ADDER,GM_MULT>,
-                bp::return_value_policy<bp::manage_new_object>(),
-            (
-                bp::arg("weights"),
-                bp::arg("numFunctions"),
-                bp::arg("numLabels"),
-                bp::arg("features"),
-                bp::arg("weightIds"),
-                bp::arg("addConstFeature")
-            )
-        );
-
-         bp::def("_lunarySharedFeatFunctionsGen",&lunarySharedFeatFunctionGen<GM_ADDER,GM_MULT>,
-                bp::with_custodian_and_ward_postcall<0, 4, bp::return_value_policy<bp::manage_new_object> >(),
-            (
-                bp::arg("weights"),
-                bp::arg("numFunctions"),
-                bp::arg("numLabels"),
-                bp::arg("features"),
-                bp::arg("weightIds"),
-                bp::arg("makeFirstEntryConst"),
-                bp::arg("addConstFeature")
-            )
-        );
-
-    }
-
-
-
-
-
-
-
-
-
-}
-
-
-template void opengm::export_lfunction_generator<op::GmAdder,op::GmMultiplier>();
diff --git a/src/interfaces/python/opengm/learning/pyLoss.cxx b/src/interfaces/python/opengm/learning/pyLoss.cxx
deleted file mode 100644
index 951559e..0000000
--- a/src/interfaces/python/opengm/learning/pyLoss.cxx
+++ /dev/null
@@ -1,81 +0,0 @@
-#include <boost/python.hpp>
-#include <boost/python/suite/indexing/vector_indexing_suite.hpp>
-#include <stdexcept>
-#include <stddef.h>
-
-//#include <opengm/learning/loss/hammingloss.hxx>
-//#include <opengm/learning/loss/generalized-hammingloss.hxx>
-#include <opengm/learning/loss/flexibleloss.hxx>
-#include <opengm/python/opengmpython.hxx>
-#include <opengm/python/converter.hxx>
-
-using namespace boost::python;
-
-namespace opengm{
-    
-void pySetNodeLossMultiplier(opengm::learning::FlexibleLoss::Parameter& p,
-                             const opengm::python::NumpyView<double,1>& m)
-{
-    p.nodeLossMultiplier_ = std::vector<double>(m.begin(), m.end());
-}
-
-void pySetLabelLossMultiplier(opengm::learning::FlexibleLoss::Parameter& p,
-                             const opengm::python::NumpyView<double,1>& m)
-{
-    p.labelLossMultiplier_ = std::vector<double>(m.begin(), m.end());
-}
-void pySetFactorLossMultiplier(opengm::learning::FlexibleLoss::Parameter& p,
-                               const opengm::python::NumpyView<double,1>& m)
-{
-    p.labelLossMultiplier_ = std::vector<double>(m.begin(), m.end());
-}
-
-
-template <class GM>
-void export_loss(){
-   typedef typename std::vector<typename GM::LabelType>::const_iterator Literator;
-   typedef typename std::vector<typename GM::LabelType>::const_iterator Niterator;
-   typedef opengm::learning::HammingLoss PyHammingLoss;
-   typedef opengm::learning::FlexibleLoss PyFlexibleLoss;
-   typedef opengm::learning::GeneralizedHammingLoss PyGeneralizedHammingLoss;
-   typedef opengm::learning::NoLoss PyNoLoss;
-
-
-
-
-
-
-    class_<PyFlexibleLoss >("FlexibleLoss")
-        //.def("loss", &PyHammingLoss::loss<const GM &, Literator,Literator>)
-        //.def("addLoss", &PyHammingLoss::addLoss<GM, Literator>)
-    ;
-
-    // learner param enum
-    enum_<PyFlexibleLoss::Parameter::LossType>("LossType")
-      .value("hamming", PyFlexibleLoss::Parameter::Hamming)
-      .value("l1",  PyFlexibleLoss::Parameter::L1)
-      .value("l2",  PyFlexibleLoss::Parameter::L2)
-      .value("partition",  PyFlexibleLoss::Parameter::Partition)
-      .value("ConfMat",  PyFlexibleLoss::Parameter::ConfMat)
-    ;
-
-
-    class_<PyFlexibleLoss::Parameter>("FlexibleLossParameter")
-        .def_readwrite("lossType", &PyFlexibleLoss::Parameter::lossType_)
-        .def("setNodeLossMultiplier", &pySetNodeLossMultiplier)
-        .def("setLabelLossMultiplier", &pySetLabelLossMultiplier)
-        .def("setFactorLossMultiplier", &pySetFactorLossMultiplier)
-    ;
-
-
-    class_<std::vector< PyFlexibleLoss::Parameter > >("FlexibleLossParameterVector")
-        .def(vector_indexing_suite<std::vector< PyFlexibleLoss::Parameter> >())
-    ;
-
-
-}
-
-
-template void export_loss<opengm::python::GmAdder>();
-
-}
diff --git a/src/interfaces/python/opengm/learning/pyMaxLikelihoodLearner.cxx b/src/interfaces/python/opengm/learning/pyMaxLikelihoodLearner.cxx
deleted file mode 100644
index 82fc5d0..0000000
--- a/src/interfaces/python/opengm/learning/pyMaxLikelihoodLearner.cxx
+++ /dev/null
@@ -1,81 +0,0 @@
-#if defined(WITH_CPLEX) || defined(WITH_GUROBI)
-
-#include <boost/python.hpp>
-#include <boost/python/module.hpp>
-#include <opengm/python/opengmpython.hxx>
-#include <opengm/python/converter.hxx>
-#include <opengm/python/numpyview.hxx>
-
-#include <opengm/inference/icm.hxx>
-#include <opengm/learning/maximum_likelihood_learning.hxx>
-
-#define DefaultErrorFn DefaultErrorFn_TrwsExternal_ML
-#include "helper.hxx"
-
-namespace bp = boost::python;
-namespace op = opengm::python;
-namespace ol = opengm::learning;
-
-namespace opengm{
-
-
-    template<class PARAM>
-    PARAM * pyMaxLikelihoodParamConstructor(
-	size_t maximumNumberOfIterations=100,
-	double gradientStepSize=0.1,
-	double weightStoppingCriteria=0.00000001,
-	double gradientStoppingCriteria=0.00000001,
-	bool infoFlag=true,
-	bool infoEveryStep=false,
-	double weightRegularizer = 1.0,
-	size_t beliefPropagationMaximumNumberOfIterations = 20,
-	double beliefPropagationConvergenceBound = 0.0001,
-	double beliefPropagationDamping = 0.5,
-	double beliefPropagationTemperature = 0.3,
-	opengm::Tribool beliefPropagationIsAcyclic=opengm::Tribool(opengm::Tribool::Maybe)
-    ){
-        PARAM * p  = new PARAM();
-	p->maximumNumberOfIterations_ = maximumNumberOfIterations;
-	p->gradientStepSize_ = gradientStepSize;
-	p->weightStoppingCriteria_ = weightStoppingCriteria;
-	p->gradientStoppingCriteria_ = gradientStoppingCriteria;
-	p->infoFlag_ = infoFlag;
-	p->infoEveryStep_ = infoEveryStep;
-	p->weightRegularizer_ = weightRegularizer;
-	p->beliefPropagationMaximumNumberOfIterations_ = beliefPropagationMaximumNumberOfIterations;
-	p->beliefPropagationConvergenceBound_ = beliefPropagationConvergenceBound;
-	p->beliefPropagationDamping_ = beliefPropagationDamping;
-	p->beliefPropagationTemperature_ = beliefPropagationTemperature;
-	p->beliefPropagationIsAcyclic_ = beliefPropagationIsAcyclic;
-        return p;
-    }
-
-    template<class DATASET>
-    void export_max_likelihood_learner(const std::string & clsName){
-        typedef learning::MaximumLikelihoodLearner<DATASET> PyLearner;
-        typedef typename PyLearner::Parameter PyLearnerParam;
-        typedef typename PyLearner::DatasetType DatasetType;
-
-        const std::string paramClsName = clsName + std::string("Parameter");
-
-        bp::class_<PyLearnerParam>(paramClsName.c_str(), bp::init<>())
-	  .def("__init__", make_constructor(&pyMaxLikelihoodParamConstructor<PyLearnerParam> ,boost::python::default_call_policies()))
-	  //.def_readwrite("maxIterations", &PyLearnerParam::maximumNumberOfIterations_)
-        ;
-
-        boost::python::class_<PyLearner>( clsName.c_str(), boost::python::init<DatasetType &, const PyLearnerParam &>() )
-            .def("learn",&PyLearner::learn)
-        ;
-    }
-
-  //template void
-  //export_max_likelihood_learner<op::GmAdderHammingLossDataset> (const std::string& className);
-
-    template void
-    export_max_likelihood_learner<op::GmAdderFlexibleLossDataset> (const std::string& className);
-}
-
-
-
-#endif
-
diff --git a/src/interfaces/python/opengm/learning/pyRws.cxx b/src/interfaces/python/opengm/learning/pyRws.cxx
deleted file mode 100644
index 43bdaf9..0000000
--- a/src/interfaces/python/opengm/learning/pyRws.cxx
+++ /dev/null
@@ -1,72 +0,0 @@
-#include <boost/python.hpp>
-#include <boost/python/module.hpp>
-#include <opengm/python/opengmpython.hxx>
-#include <opengm/python/converter.hxx>
-#include <opengm/python/numpyview.hxx>
-#include <opengm/learning/rws.hxx>
-
-#define DefaultErrorFn DefaultErrorFn_TrwsExternalRws
-#include "helper.hxx"
-
-namespace bp = boost::python;
-namespace op = opengm::python;
-namespace ol = opengm::learning;
-
-namespace opengm{
-
-
-    template<class PARAM>
-    PARAM * pyRwsParamConstructor(
-    ){
-        PARAM * p  = new PARAM();
-        return p;
-    }
-
-    template<class L >
-    L * pyRwsConstructor(
-        typename L::DatasetType & dataset,
-        const typename L::Parameter & param
-    ){
-        L * l  = new L(dataset, param);
-        return l;
-    }
-
-    template<class DATASET>
-    void export_rws_learner(const std::string & clsName){
-        typedef learning::Rws<DATASET> PyLearner;
-        typedef typename PyLearner::Parameter PyLearnerParam;
-
-        const std::string paramClsName = clsName + std::string("Parameter");
-
-
-        // learner param
-        bp::class_<PyLearnerParam>(paramClsName.c_str(), bp::init<>())
-            .def("__init__", make_constructor(&pyRwsParamConstructor<PyLearnerParam> ,boost::python::default_call_policies()))
-            .def_readwrite("eps",  &PyLearnerParam::eps_)
-            .def_readwrite("maxIterations", &PyLearnerParam::maxIterations_)
-            .def_readwrite("stopLoss", &PyLearnerParam::stopLoss_)
-            .def_readwrite("learningRate", &PyLearnerParam::learningRate_)
-            .def_readwrite("C", &PyLearnerParam::C_)
-            .def_readwrite("p", &PyLearnerParam::p_)
-            .def_readwrite("sigma", &PyLearnerParam::sigma_)
-        ;
-
-
-        // learner
-        bp::class_<PyLearner>( clsName.c_str(), bp::no_init )
-        .def("__init__", make_constructor(&pyRwsConstructor<PyLearner> ,boost::python::default_call_policies()))
-        .def(LearnerInferenceSuite<PyLearner>())
-        ;
-    }
-
-    // template void 
-    // export_subgradient_ssvm_learner<op::GmAdderHammingLossDataset> (const std::string& className);
-
-    // template void 
-    // export_subgradient_ssvm_learner<op::GmAdderGeneralizedHammingLossDataset> (const std::string& className);
-
-    template void 
-    export_rws_learner<op::GmAdderFlexibleLossDataset> (const std::string& className);
-}
-
-
diff --git a/src/interfaces/python/opengm/learning/pyStructMaxMarginLearner.cxx b/src/interfaces/python/opengm/learning/pyStructMaxMarginLearner.cxx
deleted file mode 100644
index e8d5ba7..0000000
--- a/src/interfaces/python/opengm/learning/pyStructMaxMarginLearner.cxx
+++ /dev/null
@@ -1,64 +0,0 @@
-#if defined(WITH_CPLEX) || defined(WITH_GUROBI)
-
-#include <boost/python.hpp>
-#include <boost/python/module.hpp>
-#include <opengm/python/opengmpython.hxx>
-#include <opengm/python/converter.hxx>
-#include <opengm/python/numpyview.hxx>
-
-#include <opengm/inference/icm.hxx>
-#include <opengm/learning/struct-max-margin.hxx>
-
-#define DefaultErrorFn DefaultErrorFn_TrwsExternal_SMM
-#include "helper.hxx"
-
-namespace bp = boost::python;
-namespace op = opengm::python;
-namespace ol = opengm::learning;
-
-namespace opengm{
-
-
-    template<class PARAM>
-    PARAM * pyStructMaxMarginBundleParamConstructor(
-        double regularizerWeight,
-        op::GmValueType minEps,
-        unsigned int steps,
-        bool eps_from_gap = true
-    ){
-        PARAM * p  = new PARAM();
-        p->optimizerParameter_.lambda  = regularizerWeight;
-        p->optimizerParameter_.min_eps = minEps;
-        p->optimizerParameter_.steps   = steps;
-        if(eps_from_gap)
-            p->optimizerParameter_.epsStrategy = ol::BundleOptimizer<op::GmValueType>::EpsFromGap;
-        else
-            p->optimizerParameter_.epsStrategy = ol::BundleOptimizer<op::GmValueType>::EpsFromChange;
-        return p;
-    }
-
-    template<class DATASET, class OPTIMIZER>
-    void export_struct_max_margin_bundle_learner(const std::string & clsName){
-        typedef learning::StructMaxMargin<DATASET, OPTIMIZER> PyLearner;
-        typedef typename PyLearner::Parameter PyLearnerParam;
-        typedef typename PyLearner::DatasetType DatasetType;
-
-        const std::string paramClsName = clsName + std::string("Parameter");
-
-        bp::class_<PyLearnerParam>(paramClsName.c_str(), bp::init<>())
-            .def("__init__", make_constructor(&pyStructMaxMarginBundleParamConstructor<PyLearnerParam> ,boost::python::default_call_policies()))
-        ;
-
-        boost::python::class_<PyLearner>( clsName.c_str(), boost::python::init<DatasetType &, const PyLearnerParam &>() )
-            .def(LearnerInferenceSuite<PyLearner>())
-        ;
-    }
-
-    template void
-    export_struct_max_margin_bundle_learner<op::GmAdderFlexibleLossDataset, ol::BundleOptimizer<op::GmValueType> > (const std::string& className);
-
-}
-
-
-
-#endif
diff --git a/src/interfaces/python/opengm/learning/pyStructPerceptron.cxx b/src/interfaces/python/opengm/learning/pyStructPerceptron.cxx
deleted file mode 100644
index 6e3633e..0000000
--- a/src/interfaces/python/opengm/learning/pyStructPerceptron.cxx
+++ /dev/null
@@ -1,75 +0,0 @@
-#include <boost/python.hpp>
-#include <boost/python/module.hpp>
-#include <opengm/python/opengmpython.hxx>
-#include <opengm/python/converter.hxx>
-#include <opengm/python/numpyview.hxx>
-#include <opengm/learning/structured_perceptron.hxx>
-
-#define DefaultErrorFn DefaultErrorFn_TrwsExternalSPerceptron
-#include "helper.hxx"
-
-namespace bp = boost::python;
-namespace op = opengm::python;
-namespace ol = opengm::learning;
-
-namespace opengm{
-
-
-    template<class PARAM>
-    PARAM * pyStructuredPerceptronParamConstructor(
-    ){
-        PARAM * p  = new PARAM();
-        return p;
-    }
-
-    template<class L >
-    L * pyStructuredPerceptronConstructor(
-        typename L::DatasetType & dataset,
-        const typename L::Parameter & param
-    ){
-        L * l  = new L(dataset, param);
-        return l;
-    }
-
-    template<class DATASET>
-    void export_struct_perceptron_learner(const std::string & clsName){
-        typedef learning::StructuredPerceptron<DATASET> PyLearner;
-        typedef typename PyLearner::Parameter PyLearnerParam;
-
-        const std::string paramClsName = clsName + std::string("Parameter");
-
-        const std::string paramEnumLearningModeName = clsName + std::string("Parameter_LearningMode");
-
-        // learner param enum
-        bp::enum_<typename PyLearnerParam::LearningMode>(paramEnumLearningModeName.c_str())
-            .value("online", PyLearnerParam::Online)
-            .value("batch", PyLearnerParam::Batch)
-        ;
-
-        // learner param
-        bp::class_<PyLearnerParam>(paramClsName.c_str(), bp::init<>())
-            .def("__init__", make_constructor(&pyStructuredPerceptronParamConstructor<PyLearnerParam> ,boost::python::default_call_policies()))
-            .def_readwrite("eps",  &PyLearnerParam::eps_)
-            .def_readwrite("maxIterations", &PyLearnerParam::maxIterations_)
-            .def_readwrite("stopLoss", &PyLearnerParam::stopLoss_)
-            .def_readwrite("decayExponent", &PyLearnerParam::decayExponent_)
-            .def_readwrite("decayT0", &PyLearnerParam::decayT0_)
-            .def_readwrite("learningMode", &PyLearnerParam::learningMode_)
-        ;
-
-
-        // learner
-        bp::class_<PyLearner>( clsName.c_str(), bp::no_init )
-        .def("__init__", make_constructor(&pyStructuredPerceptronConstructor<PyLearner> ,boost::python::default_call_policies()))
-        .def(LearnerInferenceSuite<PyLearner>())
-        ;
-    }
-
-    template void 
-    export_struct_perceptron_learner<op::GmAdderFlexibleLossDataset> (const std::string& className);
-
-    // template void 
-    // export_struct_perceptron_learner<op::GmAdderGeneralizedHammingLossDataset> (const std::string& className);
-}
-
-
diff --git a/src/interfaces/python/opengm/learning/pySubgradientSSVM.cxx b/src/interfaces/python/opengm/learning/pySubgradientSSVM.cxx
deleted file mode 100644
index 00d5a26..0000000
--- a/src/interfaces/python/opengm/learning/pySubgradientSSVM.cxx
+++ /dev/null
@@ -1,80 +0,0 @@
-#include <boost/python.hpp>
-#include <boost/python/module.hpp>
-#include <opengm/python/opengmpython.hxx>
-#include <opengm/python/converter.hxx>
-#include <opengm/python/numpyview.hxx>
-#include <opengm/learning/subgradient_ssvm.hxx>
-
-#define DefaultErrorFn DefaultErrorFn_TrwsExternalSubgradientSSVM
-#include "helper.hxx"
-
-namespace bp = boost::python;
-namespace op = opengm::python;
-namespace ol = opengm::learning;
-
-namespace opengm{
-
-
-    template<class PARAM>
-    PARAM * pyStructuredPerceptronParamConstructor(
-    ){
-        PARAM * p  = new PARAM();
-        return p;
-    }
-
-    template<class L >
-    L * pyStructuredPerceptronConstructor(
-        typename L::DatasetType & dataset,
-        const typename L::Parameter & param
-    ){
-        L * l  = new L(dataset, param);
-        return l;
-    }
-
-    template<class DATASET>
-    void export_subgradient_ssvm_learner(const std::string & clsName){
-        typedef learning::SubgradientSSVM<DATASET> PyLearner;
-        typedef typename PyLearner::Parameter PyLearnerParam;
-
-        const std::string paramClsName = clsName + std::string("Parameter");
-
-        const std::string paramEnumLearningModeName = clsName + std::string("Parameter_LearningMode");
-
-        // learner param enum
-        bp::enum_<typename PyLearnerParam::LearningMode>(paramEnumLearningModeName.c_str())
-            .value("online", PyLearnerParam::Online)
-            .value("batch", PyLearnerParam::Batch)
-        ;
-
-        // learner param
-        bp::class_<PyLearnerParam>(paramClsName.c_str(), bp::init<>())
-            .def("__init__", make_constructor(&pyStructuredPerceptronParamConstructor<PyLearnerParam> ,boost::python::default_call_policies()))
-            .def_readwrite("eps",  &PyLearnerParam::eps_)
-            .def_readwrite("maxIterations", &PyLearnerParam::maxIterations_)
-            .def_readwrite("stopLoss", &PyLearnerParam::stopLoss_)
-            .def_readwrite("learningRate", &PyLearnerParam::learningRate_)
-            .def_readwrite("C", &PyLearnerParam::C_)
-            .def_readwrite("learningMode", &PyLearnerParam::learningMode_)
-            .def_readwrite("averaging", &PyLearnerParam::averaging_)
-            .def_readwrite("nConf", &PyLearnerParam::nConf_)
-        ;
-
-
-        // learner
-        bp::class_<PyLearner>( clsName.c_str(), bp::no_init )
-        .def("__init__", make_constructor(&pyStructuredPerceptronConstructor<PyLearner> ,boost::python::default_call_policies()))
-        .def(LearnerInferenceSuite<PyLearner>())
-        ;
-    }
-
-    // template void 
-    // export_subgradient_ssvm_learner<op::GmAdderHammingLossDataset> (const std::string& className);
-
-    // template void 
-    // export_subgradient_ssvm_learner<op::GmAdderGeneralizedHammingLossDataset> (const std::string& className);
-
-    template void 
-    export_subgradient_ssvm_learner<op::GmAdderFlexibleLossDataset> (const std::string& className);
-}
-
-
diff --git a/src/interfaces/python/opengm/learning/pyWeights.cxx b/src/interfaces/python/opengm/learning/pyWeights.cxx
deleted file mode 100644
index 10afc6e..0000000
--- a/src/interfaces/python/opengm/learning/pyWeights.cxx
+++ /dev/null
@@ -1,46 +0,0 @@
-#include <boost/python.hpp>
-#include <boost/python/module.hpp>
-#include <opengm/python/opengmpython.hxx>
-#include <opengm/python/converter.hxx>
-#include <opengm/python/numpyview.hxx>
-
-
-
-namespace opengm{
-
-    template<class V>
-    learning::Weights<V>  * pyWeightsConstructor(
-        python::NumpyView<V, 1> values                                           
-    ){
-        learning::Weights<V>   * f = new learning::Weights<V> (values.shape(0));
-        for(size_t i=0; i<values.shape(0); ++i){
-            f->setWeight(i, values(i));
-        }
-        return f;
-    }
-
-
-
-    void export_weights(){
-        typedef  python::GmValueType V;
-        typedef learning::Weights<V> Weights;
-        boost::python::class_<Weights>("Weights",boost::python::init<const size_t >())
-            .def("__init__", make_constructor(&pyWeightsConstructor<V> ,boost::python::default_call_policies()))
-            .def("__getitem__", &Weights::getWeight)
-            .def("__setitem__", &Weights::setWeight)
-            .def("__len__", &Weights::numberOfWeights)
-        ;
-    }
-
-    void export_weight_constraints(){
-        typedef  python::GmValueType V;
-        typedef learning::WeightConstraints<V> Weights;
-        boost::python::class_<Weights>("WeightConstraints",boost::python::init<const size_t >())
-            //.def("__init__", make_constructor(&pyWeightsConstructor<V> ,boost::python::default_call_policies()))
-            //.def("__getitem__", &Weights::getWeight)
-            //.def("__setitem__", &Weights::setWeight)
-        ;
-    }
-
-
-}
diff --git a/src/interfaces/python/opengm/opengmcore/__init__.py b/src/interfaces/python/opengm/opengmcore/__init__.py
index 15f6ad7..ab17277 100644
--- a/src/interfaces/python/opengm/opengmcore/__init__.py
+++ b/src/interfaces/python/opengm/opengmcore/__init__.py
@@ -1,5 +1,4 @@
 from _opengmcore import *
-from _opengmcore import _gridVis2d
 from factorSubset import FactorSubset
 from gm_injector import _extend_gm_classes
 from factor_injector import _extend_factor_classes
@@ -194,15 +193,14 @@ class Multiplier:
    def neutral(self):
       return float(1.0)
 
-
-def gridVis(shape, numpyOrder=True):
-    assert len(shape) == 2
-    nFac = (shape[0]-1)*shape[1] + (shape[1]-1)*shape[0]
-    out = numpy.ones([nFac,2], dtype=index_type)
-    _gridVis2d(shape[0],shape[1],numpyOrder, out)
-    return out
-
-
+ 
+def modelViewFunction(factor):
+  class _ModelViewFunction:
+    def __init__(self,factor):
+      self.factor=factor
+    def __call__(self,labeling):
+      return self.factor[labeling]
+  return PythonFunction( _ModelViewFunction(factor) ,factor.shape.__tuple__())
 
 #Model generators
 def grid2d2Order(unaries,regularizer,order='numpy',operator='adder'):
@@ -353,6 +351,7 @@ _TruncatedSquaredDifferenceFunction  = TruncatedSquaredDifferenceFunction
 _PottsFunction                       = PottsFunction
 _PottsNFunction                      = PottsNFunction
 _PottsGFunction                      = PottsGFunction
+_PythonFunction                      = PythonFunction
 _FactorSubset                        = FactorSubset
 
 
diff --git a/src/interfaces/python/opengm/opengmcore/function_injector.py b/src/interfaces/python/opengm/opengmcore/function_injector.py
index f31c565..bf0f832 100644
--- a/src/interfaces/python/opengm/opengmcore/function_injector.py
+++ b/src/interfaces/python/opengm/opengmcore/function_injector.py
@@ -1,11 +1,11 @@
 from _opengmcore import ExplicitFunction,SparseFunction, \
                         TruncatedAbsoluteDifferenceFunction, \
                         TruncatedSquaredDifferenceFunction,PottsFunction,PottsNFunction, \
-                        PottsGFunction,\
+                        PottsGFunction,PythonFunction,\
                         ExplicitFunctionVector,SparseFunctionVector, \
                         TruncatedAbsoluteDifferenceFunctionVector, \
                         TruncatedSquaredDifferenceFunctionVector,PottsFunctionVector,PottsNFunctionVector, \
-                        PottsGFunctionVector
+                        PottsGFunctionVector,PythonFunctionVector
 import numpy
 
 
@@ -25,8 +25,8 @@ def _extend_function_vector_classes():
     function_vector_classes=[   ExplicitFunctionVector,SparseFunctionVector,
                                 TruncatedAbsoluteDifferenceFunctionVector,
                                 TruncatedSquaredDifferenceFunctionVector,PottsFunctionVector,
-                                PottsNFunctionVector,PottsGFunctionVector
-                                 ]  
+                                PottsNFunctionVector,PottsGFunctionVector,
+                                PythonFunctionVector ]  
 
     for function_vector in function_vector_classes:
         class InjectorGenericFunctionVector(object):
@@ -50,7 +50,8 @@ def _extend_function_type_classes():
   function_classes=[ExplicitFunction,SparseFunction,
                     TruncatedAbsoluteDifferenceFunction,
                     TruncatedSquaredDifferenceFunction,PottsFunction,
-                    PottsNFunction,PottsGFunction]
+                    PottsNFunction,PottsGFunction,
+                    PythonFunction]
 
 
 
diff --git a/src/interfaces/python/opengm/opengmcore/opengmcore.cpp b/src/interfaces/python/opengm/opengmcore/opengmcore.cpp
index dee2b1e..c63aae1 100644
--- a/src/interfaces/python/opengm/opengmcore/opengmcore.cpp
+++ b/src/interfaces/python/opengm/opengmcore/opengmcore.cpp
@@ -14,8 +14,6 @@
 #include <opengm/utilities/tribool.hxx>
 #include <opengm/inference/inference.hxx>
 
-
-
 #include <opengm/python/opengmpython.hxx>
 #include <opengm/python/converter.hxx>
 #include <opengm/python/numpyview.hxx>
@@ -278,37 +276,6 @@ GM *  pyPottsModel3d(
 
 }
 
-
-
-void gridVis2d(
-    const size_t dx,
-    const size_t dy,
-    const bool numpyOrder,
-    opengm::python::NumpyView< opengm::python::GmIndexType, 2> visarray
-){
-    size_t shape[2]={dx,dy};
-    CoordToVi toVi(shape,shape+2,numpyOrder);
-
-    size_t c=0;
-
-    for(size_t x=0; x<dx;++x)
-    for(size_t y=0; y<dy;++y){
-
-        if(x+1<dx){
-            visarray(c,0) = toVi(x,y);
-            visarray(c,1) = toVi(x+1,y);
-            ++c;
-        }
-        if(y+1<dy){
-            visarray(c,0) = toVi(x,y);
-            visarray(c,1) = toVi(x,y+1);
-            ++c;
-        }
-    }
-}
-
-
-
 void  makeMaskedState(
     opengm::python::NumpyView< opengm::UInt32Type, 3> mask,
     opengm::python::NumpyView< opengm::UInt64Type, 1> arg,
@@ -568,34 +535,6 @@ void dequePushBack(
 }
 
 
-template<class V>
-opengm::learning::Weights<V>  * pyWeightsConstructor(
-    opengm::python::NumpyView<V, 1> values                                           
-){
-    opengm::learning::Weights<V>   * f = new opengm::learning::Weights<V> (values.shape(0));
-    for(size_t i=0; i<values.shape(0); ++i){
-        f->setWeight(i, values(i));
-    }
-    return f;
-}
-
-
-
-template<class V>
-void pyExportWeights(const std::string & clsName){
-
-    typedef opengm::learning::Weights<V> Weights;
-
-    boost::python::class_<Weights>(clsName.c_str(),boost::python::init<const size_t >())
-
-        .def("__init__", make_constructor(&pyWeightsConstructor<V> ,boost::python::default_call_policies()))
-        .def("__getitem__", &Weights::getWeight)
-        .def("__setitem__", &Weights::setWeight)
-    ;
-
-}
-
-
 
 BOOST_PYTHON_MODULE_INIT(_opengmcore) {
    Py_Initialize();
@@ -713,7 +652,7 @@ BOOST_PYTHON_MODULE_INIT(_opengmcore) {
    }
 
 
-   boost::python::def("_gridVis2d",&gridVis2d);
+
 
    //export_rag();
    export_config();
@@ -742,6 +681,7 @@ BOOST_PYTHON_MODULE_INIT(_opengmcore) {
 
       export_potts_model_3d<opengm::python::GmAdder>();
       export_potts_model_3d_masked<opengm::python::GmAdder>();
+
    }
    //multiplier
    {
diff --git a/src/interfaces/python/opengm/opengmcore/pyFunctionGen.cxx b/src/interfaces/python/opengm/opengmcore/pyFunctionGen.cxx
index 2d138f6..c9f9d3a 100644
--- a/src/interfaces/python/opengm/opengmcore/pyFunctionGen.cxx
+++ b/src/interfaces/python/opengm/opengmcore/pyFunctionGen.cxx
@@ -117,63 +117,6 @@ private:
 };
 
 
-template<class GM_ADDER,class GM_MULT,class FUNCTION_TYPE>
-class LPottsFunctionGen :
-public FunctionGeneratorBase<GM_ADDER,GM_MULT>
-{
-public:
-    typedef FUNCTION_TYPE FunctionType;
-    typedef typename FUNCTION_TYPE::ValueType ValueType;
-    typedef typename FUNCTION_TYPE::IndexType IndexType;
-    typedef typename FUNCTION_TYPE::LabelType LabelType;
-    LPottsFunctionGen(
-
-        size_t numberOfLabels,
-        opengm::python::NumpyView<LabelType,2> numLabels1Array,
-        opengm::python::NumpyView<LabelType,1> numLabels2Array,
-        opengm::python::NumpyView<ValueType,1> valEqualArray,
-        opengm::python::NumpyView<ValueType,1> valNotEqualArray
-    ):FunctionGeneratorBase<GM_ADDER,GM_MULT>(),
-    numLabels1Array_(numLabels1Array),
-    numLabels2Array_(numLabels2Array),
-    valEqualArray_(valEqualArray),
-    valNotEqualArray_(valNotEqualArray)
-    {
-        numFunctions_=std::max( 
-            std::max(numLabels1Array_.shape(0),numLabels2Array_.shape(0)) , 
-            std::max(valEqualArray_.shape(0),valNotEqualArray_.shape(0))
-        );
-    }  
-
-   template<class GM>
-   std::vector< typename GM::FunctionIdentifier > * addFunctionsGeneric(GM & gm)const{
-      std::vector< typename GM::FunctionIdentifier > * fidVector = new std::vector< typename GM::FunctionIdentifier > (numFunctions_);
-      for(size_t  i=0;i<numFunctions_;++i){
-         const LabelType numL1=i<numLabels1Array_.size() ? numLabels1Array_(i) : numLabels1Array_(numLabels1Array_.size()-1);
-         const LabelType numL2=i<numLabels2Array_.size() ? numLabels2Array_(i) : numLabels2Array_(numLabels2Array_.size()-1);
-         const ValueType veq=i<valEqualArray_.size() ? valEqualArray_(i) : valEqualArray_(valEqualArray_.size()-1);
-         const ValueType vneq=i<valNotEqualArray_.size() ? valNotEqualArray_(i) : valNotEqualArray_(valNotEqualArray_.size()-1);
-         (*fidVector)[i]=gm.addFunction(FunctionType(numL1,numL2,veq,vneq));
-      }
-      return fidVector;
-   }
-    
-   virtual std::vector< typename GM_ADDER::FunctionIdentifier > * addFunctions(GM_ADDER & gm)const{
-      return this-> template addFunctionsGeneric<GM_ADDER>(gm);
-   }
-   virtual std::vector< typename GM_MULT::FunctionIdentifier >  * addFunctions(GM_MULT & gm)const{
-      return this-> template addFunctionsGeneric<GM_MULT>(gm);
-   }
-private:
-   opengm::python::NumpyView<LabelType,1>  numLabels1Array_;
-   opengm::python::NumpyView<LabelType,1>  numLabels2Array_;
-   opengm::python::NumpyView<ValueType,1>  valEqualArray_;
-   opengm::python::NumpyView<ValueType,1>  valNotEqualArray_;
-   size_t numFunctions_;
-};
-
-
-
 template<class GM_ADDER,class GM_MULT,class FUNCTION>
 inline FunctionGeneratorBase<GM_ADDER,GM_MULT> * pottsFunctionGen(
     opengm::python::NumpyView<typename GM_ADDER::LabelType,1> numLabels1Array,
diff --git a/src/interfaces/python/opengm/opengmcore/pyFunctionTypes.cxx b/src/interfaces/python/opengm/opengmcore/pyFunctionTypes.cxx
index f5a343a..6571b3d 100644
--- a/src/interfaces/python/opengm/opengmcore/pyFunctionTypes.cxx
+++ b/src/interfaces/python/opengm/opengmcore/pyFunctionTypes.cxx
@@ -8,10 +8,10 @@
 #include <map>
 
 #include "nifty_iterator.hxx"
-#include "opengm/python/opengmpython.hxx"
-#include "opengm/python/converter.hxx"
-#include "opengm/python/numpyview.hxx"
-#include "opengm/python/pythonfunction.hxx"
+#include <opengm/python/opengmpython.hxx>
+#include <opengm/python/converter.hxx>
+#include <opengm/python/numpyview.hxx>
+#include <opengm/python/pythonfunction.hxx>
 
 #include "copyhelper.hxx"
 
@@ -26,9 +26,7 @@
 #include "opengm/functions/truncated_squared_difference.hxx"
 #include "opengm/functions/sparsemarray.hxx"
 
-#include "opengm/functions/learnable/lpotts.hxx"
-#include "opengm/functions/learnable/lunary.hxx"
-#include "opengm/functions/learnable/lweightedsum_of_functions.hxx"
+
 
 
 using namespace boost::python;
@@ -171,146 +169,6 @@ namespace pyfunction{
       return f;
    }
 
-
-
-
-   template<class FUNCTION>
-   FUNCTION * lPottsConstructor(
-        opengm::python::PyWeights & pyWeights,
-        const opengm::python::GmLabelType numberOfLabels,
-        opengm::python::NumpyView<opengm::python::GmIndexType,1> weightIds,
-        opengm::python::NumpyView<opengm::python::GmValueType,1> features
-
-    ){
-      FUNCTION * f = NULL;
-      
-      std::vector<size_t>      weightIdVec(weightIds.begin(), weightIds.end());
-      std::vector<opengm::python::GmValueType> featureVec(features.begin(), features.end());
-
-      f = new FUNCTION(pyWeights, numberOfLabels, weightIdVec, featureVec);
-      return f;
-   }
-
-
-    template<class FUNCTION>
-    FUNCTION * lUnaryConstructor(
-        opengm::python::PyWeights & pyWeights,
-        const opengm::python::GmLabelType numberOfLabels,
-        opengm::python::NumpyView<opengm::python::GmIndexType,2> weightIds,
-        opengm::python::NumpyView<opengm::python::GmValueType,2> features
-    ){
-        FUNCTION * f = NULL;
-        typedef opengm::functions::learnable::FeaturesAndIndices<
-            opengm::python::GmValueType,
-            opengm::python::GmIndexType
-        > FI;
-        typedef std::vector<FI> FI_VEC;
-
-        size_t fPerL = weightIds.shape(1);
-
-        OPENGM_CHECK_OP(weightIds.shape(0), <=, numberOfLabels,   "wrong shapes");
-        OPENGM_CHECK_OP(weightIds.shape(0), >=, numberOfLabels-1,   "wrong shapes");
-        OPENGM_CHECK_OP(weightIds.shape(0), ==, features.shape(0),"wrong shapes");
-        OPENGM_CHECK_OP(weightIds.shape(1), ==, features.shape(1),"wrong shapes");
-
-        FI_VEC fiVec(numberOfLabels);
-
-        const size_t weightShape0 =  weightIds.shape(0);
-        for(size_t l=0; l<weightShape0; ++l){
-            fiVec[l].weightIds.resize(fPerL);
-            fiVec[l].features.resize(fPerL);
-            for(size_t i=0; i<fPerL; ++i){
-                fiVec[l].weightIds[i] = weightIds(l, i);
-                fiVec[l].features[i] = features(l, i);
-            }
-        }
-        //std::cout<<"done on python side\n";
-        f = new FUNCTION(pyWeights, fiVec);
-        return f;
-    }
-
-    template<class FUNCTION>
-    FUNCTION * lUnaryConstructorList(
-        opengm::python::PyWeights & pyWeights,
-        const opengm::python::GmLabelType numberOfLabels,
-        boost::python::list weightIds,
-        boost::python::list features
-    ){
-
-        typedef opengm::python::NumpyView<opengm::python::GmIndexType,1> IndexArray;
-        typedef opengm::python::NumpyView<opengm::python::GmValueType,1> ValueArray;
-
-
-        OPENGM_CHECK_OP(boost::python::len(weightIds), == ,numberOfLabels ,"length of weightIds must be numberOfLabels");
-        OPENGM_CHECK_OP(boost::python::len(weightIds), == ,boost::python::len(features) ,"weightIds must be as long as features");
-
-
-        FUNCTION * f = NULL;
-        typedef opengm::functions::learnable::FeaturesAndIndices<
-            opengm::python::GmValueType,
-            opengm::python::GmIndexType
-        > FI;
-        typedef std::vector<FI> FI_VEC;
-
-        FI_VEC fiVec(numberOfLabels);
-
-        for(size_t l=0; l<numberOfLabels; ++l){
-
-            std::cout<<"extr. l "<<l<<"\n";
-            boost::python::extract<boost::python::numeric::array> eW(weightIds[l]);
-            boost::python::extract<boost::python::numeric::array> eF(features[l]);
-
-            IndexArray wId = eW();
-            ValueArray fs = eF();
-
-            std::cout<<"done\n";
-
-            OPENGM_CHECK_OP(wId.shape(0), ==, fs.shape(0), 
-                "for one label the number of features and the number of weights must be the same");
-
-            const size_t fPerL = wId.shape(0);
-            fiVec[l].weightIds.resize(fPerL);
-            fiVec[l].features.resize(fPerL);
-
-            for(size_t i=0; i<fPerL; ++i){
-                fiVec[l].weightIds[i] = wId(i);
-                fiVec[l].features[i] = fs(i);
-            }
-        }
-        f = new FUNCTION(pyWeights, fiVec);
-        return f;
-   }
-
-    template<class FUNCTION>
-    FUNCTION * weightedSumOfFunctionsConstructor(
-        boost::python::object pyShape,
-        opengm::python::PyWeights& pyWeights,
-        opengm::python::NumpyView<opengm::python::GmIndexType,1> weightIds,
-        opengm::python::NumpyView<opengm::python::GmValueType,3> features
-    ){
-        stl_input_iterator<int> begin(pyShape), end;
-        std::vector<opengm::python::GmLabelType> shape(begin, end);
-        std::vector<size_t> weightIdVec(weightIds.begin(), weightIds.end());
-        std::vector<marray::Marray<opengm::python::GmValueType> > featureVec;
-        for(size_t i = 0; i < features.shape(0); i++)
-        {
-            featureVec.push_back(marray::Marray<opengm::python::GmValueType>(features.getSliceView(0, i)));
-        }
-
-        FUNCTION * f = NULL;
-
-        OPENGM_CHECK_OP(weightIdVec.size(), ==, featureVec.size(),"wrong shapes");
-        if(weightIdVec.size() > 0)
-        {
-            OPENGM_CHECK_OP(shape[0], ==, featureVec[0].shape(0),"wrong feature array shapes");
-            OPENGM_CHECK_OP(shape[1], ==, featureVec[0].shape(1),"wrong feature array shapes");
-        }
-
-        f = new FUNCTION(shape, pyWeights, weightIdVec, featureVec);
-        return f;
-    }
-
-
    ////////////////////////////////////////
    // EXPLICIT FUNCTION
    ////////////////////////////////////////
@@ -467,19 +325,17 @@ void export_functiontypes(){
    typedef IndexType LabelType;
 
    // different function types
-   typedef opengm::ExplicitFunction                                <ValueType,IndexType,LabelType> PyExplicitFunction;
-   typedef opengm::PottsFunction                                   <ValueType,IndexType,LabelType> PyPottsFunction;
-   typedef opengm::PottsNFunction                                  <ValueType,IndexType,LabelType> PyPottsNFunction;
-   typedef opengm::PottsGFunction                                  <ValueType,IndexType,LabelType> PyPottsGFunction;
-   typedef opengm::AbsoluteDifferenceFunction                      <ValueType,IndexType,LabelType> PyAbsoluteDifferenceFunction;
-   typedef opengm::TruncatedAbsoluteDifferenceFunction             <ValueType,IndexType,LabelType> PyTruncatedAbsoluteDifferenceFunction;
-   typedef opengm::SquaredDifferenceFunction                       <ValueType,IndexType,LabelType> PySquaredDifferenceFunction;
-   typedef opengm::TruncatedSquaredDifferenceFunction              <ValueType,IndexType,LabelType> PyTruncatedSquaredDifferenceFunction;
-   typedef opengm::SparseFunction                                  <ValueType,IndexType,LabelType> PySparseFunction; 
-   typedef opengm::functions::learnable::LPotts                    <ValueType,IndexType,LabelType> PyLPottsFunction;
-   typedef opengm::functions::learnable::LUnary                    <ValueType,IndexType,LabelType> PyLUnaryFunction;
-   typedef opengm::functions::learnable::LWeightedSumOfFunctions   <ValueType,IndexType,LabelType> PyLSumOfWeightedFunction;
-
+   typedef opengm::ExplicitFunction                      <ValueType,IndexType,LabelType> PyExplicitFunction;
+   typedef opengm::PottsFunction                         <ValueType,IndexType,LabelType> PyPottsFunction;
+   typedef opengm::PottsNFunction                        <ValueType,IndexType,LabelType> PyPottsNFunction;
+   typedef opengm::PottsGFunction                        <ValueType,IndexType,LabelType> PyPottsGFunction;
+   typedef opengm::AbsoluteDifferenceFunction            <ValueType,IndexType,LabelType> PyAbsoluteDifferenceFunction;
+   typedef opengm::TruncatedAbsoluteDifferenceFunction   <ValueType,IndexType,LabelType> PyTruncatedAbsoluteDifferenceFunction;
+   typedef opengm::SquaredDifferenceFunction             <ValueType,IndexType,LabelType> PySquaredDifferenceFunction;
+   typedef opengm::TruncatedSquaredDifferenceFunction    <ValueType,IndexType,LabelType> PyTruncatedSquaredDifferenceFunction;
+   typedef opengm::SparseFunction                        <ValueType,IndexType,LabelType> PySparseFunction; 
+   typedef opengm::python::PythonFunction                <ValueType,IndexType,LabelType> PyPythonFunction; 
+    
    // vector exporters
    export_function_type_vector<PyExplicitFunction>("ExplicitFunctionVector");
    
@@ -503,6 +359,7 @@ void export_functiontypes(){
    //export_function_type_vector<PySquaredDifferenceFunction>("SquaredDifferenceFunctionVector");
    export_function_type_vector<PyTruncatedSquaredDifferenceFunction>("TruncatedSquaredDifferenceFunctionVector");
    export_function_type_vector<PySparseFunction>("SparseFunctionVector");
+   export_function_type_vector<PyPythonFunction>("PythonFunctionVector");
 
    typedef typename PySparseFunction::ContainerType PySparseFunctionMapType;
    //export std::map for sparsefunction
@@ -725,55 +582,23 @@ void export_functiontypes(){
    )
    ;
    
+   FUNCTION_TYPE_EXPORTER_HELPER(PyPythonFunction,                       "PythonFunction")
+   .def(init<boost::python::object,boost::python::object,const bool>(
+         (arg("function"),arg("shape"),arg("ensureGilState")=true),
+         "Examples: ::\n\n"
+         "   >>> import opengm\n"
+         "   >>> import numpy\n" 
+         "   >>> def labelSumFunction(labels):\n"
+         "   ...    s=0\n"
+         "   ...    for l in labels:\n"
+         "   ...       s+=l\n"
+         "   ...    return s\n"
+         "   >>> f=opengm.PythonFunction(function=labelSumFunction,shape=[2,2])\n"
+         "\n\n"
+      )
+   )
+   ;
 
-
-   FUNCTION_TYPE_EXPORTER_HELPER(PyLPottsFunction,"LPottsFunction")
-    .def("__init__", make_constructor(&pyfunction::lPottsConstructor<PyLPottsFunction> ,default_call_policies(),
-         (
-            boost::python::arg("weights"),
-            boost::python::arg("numberOfLabels"),
-            boost::python::arg("weightIds"),
-            boost::python::arg("features")
-         )
-      ),
-   "todo"
-   );
-
-    FUNCTION_TYPE_EXPORTER_HELPER(PyLUnaryFunction,"LUnaryFunction")
-    .def("__init__", make_constructor(&pyfunction::lUnaryConstructor<PyLUnaryFunction> ,default_call_policies(),
-         (
-            boost::python::arg("weights"),
-            boost::python::arg("numberOfLabels"),
-            boost::python::arg("weightIds"),
-            boost::python::arg("features")
-         )
-      ),
-   "todo"
-    )
-    .def("__init__", make_constructor(&pyfunction::lUnaryConstructorList<PyLUnaryFunction> ,default_call_policies(),
-         (
-            boost::python::arg("weights"),
-            boost::python::arg("numberOfLabels"),
-            boost::python::arg("weightIds"),
-            boost::python::arg("features")
-         )
-      ),
-   "todo"
-    )
-    ;
-
-    FUNCTION_TYPE_EXPORTER_HELPER(PyLSumOfWeightedFunction,"SumOfExpertsFunction")
-    .def("__init__", make_constructor(&pyfunction::weightedSumOfFunctionsConstructor<PyLSumOfWeightedFunction> ,default_call_policies(),
-         (
-            boost::python::arg("shape"),
-            boost::python::arg("weight"),
-            boost::python::arg("weightIds"),
-            boost::python::arg("features")
-         )
-      ),
-   "todo"
-    )
-    ;
 }
 
 template void export_functiontypes<opengm::python::GmValueType,opengm::python::GmIndexType>();
diff --git a/src/interfaces/python/opengm/opengmcore/pyGm.cxx b/src/interfaces/python/opengm/opengmcore/pyGm.cxx
index 1b2c6cf..0fe3cc9 100644
--- a/src/interfaces/python/opengm/opengmcore/pyGm.cxx
+++ b/src/interfaces/python/opengm/opengmcore/pyGm.cxx
@@ -682,8 +682,7 @@ namespace pygm {
          typedef opengm::SquaredDifferenceFunction             <ValueType,IndexType,LabelType> PySquaredDifferenceFunction;
          typedef opengm::TruncatedSquaredDifferenceFunction    <ValueType,IndexType,LabelType> PyTruncatedSquaredDifferenceFunction;
          typedef opengm::SparseFunction                        <ValueType,IndexType,LabelType> PySparseFunction; 
-         typedef opengm::functions::learnable::LPotts          <ValueType,IndexType,LabelType> PyLPottsFunction;
-         typedef opengm::functions::learnable::LUnary          <ValueType,IndexType,LabelType> PyLUnaryFunction;
+         typedef opengm::python::PythonFunction                <ValueType,IndexType,LabelType> PyPythonFunction; 
 
          if(fname==std::string("explicit")){
             return gm. template  reserveFunctions<PyExplicitFunction>(size);
@@ -706,11 +705,8 @@ namespace pygm {
          else if(fname==std::string("sparse")){
             return gm. template  reserveFunctions<PySparseFunction>(size);
          }
-         else if(fname==std::string("lpotts")){
-            return gm. template  reserveFunctions<PyLPottsFunction>(size);
-         }
-         else if(fname==std::string("lunary")){
-            return gm. template  reserveFunctions<PyLUnaryFunction>(size);
+         else if(fname==std::string("python")){
+            return gm. template  reserveFunctions<PyPythonFunction>(size);
          }
          else{
             throw opengm::RuntimeError(fname + std::string(" is an unknown function type name"));
@@ -1461,8 +1457,9 @@ void export_gm() {
    typedef opengm::SquaredDifferenceFunction             <ValueType,IndexType,LabelType> PySquaredDifferenceFunction;
    typedef opengm::TruncatedSquaredDifferenceFunction    <ValueType,IndexType,LabelType> PyTruncatedSquaredDifferenceFunction;
    typedef opengm::SparseFunction                        <ValueType,IndexType,LabelType> PySparseFunction; 
-   typedef opengm::functions::learnable::LPotts          <ValueType,IndexType,LabelType> PyLPottsFunction;
-   typedef opengm::functions::learnable::LUnary          <ValueType,IndexType,LabelType> PyLUnaryFunction;
+   typedef opengm::python::PythonFunction                <ValueType,IndexType,LabelType> PyPythonFunction; 
+
+
 
    typedef typename PyGm::FunctionIdentifier PyFid;
    typedef typename PyGm::FactorType PyFactor;
@@ -1840,9 +1837,10 @@ void export_gm() {
    //.def("_addFunctions_vector",&pygm::addFunctionsGenericVectorPy<PyGm,PySquaredDifferenceFunction>,return_value_policy<manage_new_object>(),args("functions"),"todo")
    .def("_addFunctions_vector",&pygm::addFunctionsGenericVectorPy<PyGm,PyTruncatedSquaredDifferenceFunction>,return_value_policy<manage_new_object>(),args("functions"),"todo")
    .def("_addFunctions_vector",&pygm::addFunctionsGenericVectorPy<PyGm,PySparseFunction>,return_value_policy<manage_new_object>(),args("functions"),"todo")
+   .def("_addFunctions_vector",&pygm::addFunctionsGenericVectorPy<PyGm,PyPythonFunction>,return_value_policy<manage_new_object>(),args("functions"),"todo")
 
-   .def("_addFunction",&pygm::addFunctionGenericPy<PyGm,PyLUnaryFunction>,args("function"))
-   .def("_addFunction",&pygm::addFunctionGenericPy<PyGm,PyLPottsFunction>,args("function"))
+
+   .def("_addFunction",&pygm::addFunctionGenericPy<PyGm,PyPottsFunction>,args("function"))
    .def("_addFunction",&pygm::addFunctionGenericPy<PyGm,PyPottsFunction>,args("function"))
    .def("_addFunction",&pygm::addFunctionGenericPy<PyGm,PyPottsNFunction>,args("function"))
    .def("_addFunction",&pygm::addFunctionGenericPy<PyGm,PyPottsGFunction>,args("function"))
@@ -1851,9 +1849,10 @@ void export_gm() {
    //.def("_addFunction",&pygm::addFunctionGenericPy<PyGm,PySquaredDifferenceFunction>,args("function"))
    .def("_addFunction",&pygm::addFunctionGenericPy<PyGm,PyTruncatedSquaredDifferenceFunction>,args("function"))
    .def("_addFunction",&pygm::addFunctionGenericPy<PyGm,PySparseFunction>,args("function"))
-   .def("_addFunction", &pygm::addFunctionNpPy<PyGm>,args("function"))
+   .def("_addFunction",&pygm::addFunctionGenericPy<PyGm,PyPythonFunction>,args("function"))
+	.def("_addFunction", &pygm::addFunctionNpPy<PyGm>,args("function"))
    .def("_addFactor", &pygm::addFactor_Any<PyGm,int>, (arg("fid"),arg("variableIndices"),arg("finalize")))
-   .def("_addFactor", &pygm::addFactor_Numpy<PyGm>, (arg("fid"),arg("variableIndices"),arg("finalize")))
+	.def("_addFactor", &pygm::addFactor_Numpy<PyGm>, (arg("fid"),arg("variableIndices"),arg("finalize")))
    .def("_addFactor", &pygm::addFactor_Vector<PyGm>, (arg("fid"),arg("variableIndices"),arg("finalize")))
    .def("_addUnaryFactors_vector_numpy", &pygm::addUnaryFactors_Vector_Numpy<PyGm>, (arg("fid"),arg("variableIndices"),arg("finalize")))
    .def("_addFactors_vector_numpy", &pygm::addFactors_Vector_Numpy<PyGm>, (arg("fid"),arg("variableIndices"),arg("finalize")))
diff --git a/src/interfaces/python/test.py b/src/interfaces/python/test.py
index 47542c7..b0db546 100644
--- a/src/interfaces/python/test.py
+++ b/src/interfaces/python/test.py
@@ -3,8 +3,6 @@ import opengm
 import os
 import sys
 import random
-import opengm.learning
-
 class TestAllExampes:
     def test_run(self):
         for r, d, f in os.walk("examples"):
@@ -1049,7 +1047,6 @@ class Test_Inference():
                                gms=[self.mcGm],
                                semiRings=self.minSum,testPythonVisitor=False)
 
-    """
     def test_lpcplex(self):
         if opengm.configuration.withCplex:
             solverClass = opengm.inference.LpCplex
@@ -1060,7 +1057,7 @@ class Test_Inference():
                                gms=[self.gridGm, self.chainGm, self.gridGm3,
                                     self.chainGm3],
                                semiRings=self.minSum,testPythonVisitor=False,testLpInterface=True)
-    
+    """
     def test_lpcplex2(self):
         if False and opengm.configuration.withCplex:
             solverClass = opengm.inference.LpCplex2
@@ -1179,116 +1176,6 @@ class Test_Inference():
                                     self.chainGm3],
                                semiRings=self.minSum,testPythonVisitor=False)
 
-class Test_Learning:
-    def __init__(self):
-        self.__nWeights = 12
-        self.__shape = [10,10]
-
-    # utility functions
-    def __makeGt(self, shape):
-        gt=numpy.ones(shape,dtype='uint8')
-        gt[0:shape[0]/2,:] = 0
-        return gt
-
-    def __create_dataset(self, functionType, numModels=1):
-        numWeights = 4
-        dataset = opengm.learning.createDataset(numWeights=numWeights)
-        weights = dataset.getWeights()
-
-        gt = self.__makeGt(self.__shape)
-        numVars = self.__shape[0] * self.__shape[1]
-        numLabels = 2
-
-        uWeightIds = numpy.array([[0, 1]], dtype='uint64')
-        bWeightIds = numpy.array([2, 3], dtype='uint64')
-
-        for m in range(numModels):
-            gm = opengm.gm(numpy.ones(numVars) * numLabels)
-
-            # create noisy data
-            random  = (numpy.random.rand(*gt.shape)-0.5)*0.3
-            noisyGt = random + gt
-
-            # add unarias
-            for x in range(self.__shape[0]):
-                for y in range(self.__shape[1]):
-                    # use noised GT input, and a constant feature
-                    uFeat = numpy.array([[noisyGt[x,y], 1]], dtype='float64')
-
-                    lu = opengm.learning.lUnaryFunction(weights=weights,numberOfLabels=numLabels, 
-                                                 features=uFeat, weightIds=uWeightIds)
-                    fid = gm.addFunction(lu)
-                    facIndex = gm.addFactor(fid, y+x*self.__shape[1])
-
-            # add pairwise
-            for x in range(self.__shape[0]):
-                for y in range(self.__shape[1]):
-
-                    if x+1 < self.__shape[0]:
-                        gradientMag = (noisyGt[x+1,y] - noisyGt[x,y])**2
-                        bFeat = numpy.array([gradientMag, 1], dtype='float64')
-                        pf = opengm.LPottsFunction(weights=weights,numberOfLabels=numLabels, features=bFeat, weightIds=bWeightIds)
-                        fid= gm.addFunction(pf)
-                        gm.addFactor(fid, [y+x*self.__shape[1], y+(x+1)*self.__shape[1]])
-                    if y+1 < self.__shape[1]:
-                        gradientMag = (noisyGt[x,y+1] - noisyGt[x,y])**2
-                        bFeat = numpy.array([gradientMag, 1], dtype='float64')
-                        pf = opengm.LPottsFunction(weights=weights,numberOfLabels=numLabels, features=bFeat, weightIds=bWeightIds)
-                        fid= gm.addFunction(pf)
-                        gm.addFactor(fid, [y+x*self.__shape[1], (y+1)+x*self.__shape[1]])
-
-            # store GM and its GT
-            dataset.pushBackInstance(gm, gt.reshape([-1]).astype(opengm.label_type))
-
-        return dataset
-
-    def __create_weights(self, numWeights):
-        weightVals = numpy.ones(numWeights)
-        weights = opengm.learning.Weights(weightVals)
-        return weights
-
-    # def __create_loss(self):
-
-    def __generic_learner_test(self, learner):
-        if opengm.configuration.withTrws:
-            learner.learn(infCls=opengm.inference.TrwsExternal, parameter=opengm.InfParam())
-        elif opengm.configuration.withCplex:
-            learner.learn(infCls=opengm.inference.LpCplex, parameter=opengm.InfParam())
-        else:
-            learner.learn(infCls=opengm.inference.Icm, parameter=opengm.InfParam())
-
-    # tests
-    def test_weights(self):
-        weights = self.__create_weights(self.__nWeights)
-        assert(len(weights) == self.__nWeights)
-
-        value = 15
-        weights[3] = value
-        assert(weights[3] == value)
-
-    def test_dataset(self):
-        ds = self.__create_dataset('potts', 1)
-        assert(ds.getNumberOfWeights() == 4)
-        assert(ds.getNumberOfModels() == 1)
-
-    def test_dataset_serialization(self):
-        import tempfile
-        import shutil
-        ds = self.__create_dataset(self.__nWeights)
-        # TODO: create temp directory
-        temp_path = tempfile.mkdtemp()
-        prefix = 'test'
-        ds.save(temp_path, prefix)
-
-        loaded_ds = opengm.learning.DatasetWithFlexibleLoss(0)
-        loaded_ds.load(temp_path, prefix)
-        shutil.rmtree(temp_path)
-
-        assert(ds.getNumberOfWeights() == loaded_ds.getNumberOfWeights())
-        assert(ds.getNumberOfModels() == loaded_ds.getNumberOfModels())
-        assert(ds.getModel(0).numberOfVariables == loaded_ds.getModel(0).numberOfVariables)
-        assert(ds.getModel(0).numberOfFactors == loaded_ds.getModel(0).numberOfFactors)
-
 
 if __name__ == "__main__":
     t = Test_Inference()
diff --git a/src/tutorials/c++/basics/doMinSumInference.cxx b/src/tutorials/c++/basics/doMinSumInference.cxx
index 1aa00a0..bc8a11e 100644
--- a/src/tutorials/c++/basics/doMinSumInference.cxx
+++ b/src/tutorials/c++/basics/doMinSumInference.cxx
@@ -123,7 +123,7 @@ int main(int argc, char** argv) {
    // Infer with TRWSI 
    std::cout << "Start TRWSi inference ... " <<std::endl;
    typedef opengm::TRWSi<Model,opengm::Minimizer> TRWSi;
-   TRWSi::Parameter para(size_t(100));
+   TRWSi::Parameter para(100);
    para.precision_=1e-12;
    TRWSi trws(gm,para);
    trws.infer();
diff --git a/src/tutorials/c++/basics/doSumProdInference.cxx b/src/tutorials/c++/basics/doSumProdInference.cxx
index e4a792b..2cad7da 100644
--- a/src/tutorials/c++/basics/doSumProdInference.cxx
+++ b/src/tutorials/c++/basics/doSumProdInference.cxx
@@ -126,7 +126,7 @@ void inferBP(const Model& gm, bool normalization = true){
    typedef opengm::BeliefPropagationUpdateRules<Model, opengm::Integrator> UpdateRules;
    typedef opengm::MessagePassing<Model, opengm::Integrator, UpdateRules, opengm::MaxDistance>  LBP; 
   
-   LBP::Parameter parameter(size_t(100)); //maximal number of iterations=0
+   LBP::Parameter parameter(100); //maximal number of iterations=0
    parameter.useNormalization_ = normalization;
    LBP lbp(gm, parameter); 
   
diff --git a/src/unittest/CMakeLists.txt b/src/unittest/CMakeLists.txt
index bbf3411..8110b7a 100644
--- a/src/unittest/CMakeLists.txt
+++ b/src/unittest/CMakeLists.txt
@@ -29,10 +29,7 @@ if(BUILD_TESTING)
    add_test(test-fast-sequence ${CMAKE_CURRENT_BINARY_DIR}/test-fast-sequence)
 
    add_executable(test-functions test_functions.cxx ${headers})
-   add_test(test-functions ${CMAKE_CURRENT_BINARY_DIR}/test-functions) 
-
-   add_executable(test-learnable-functions test_learnable_functions.cxx ${headers})
-   add_test(test-learnable-functions ${CMAKE_CURRENT_BINARY_DIR}/test-learnable-functions)
+   add_test(test-functions ${CMAKE_CURRENT_BINARY_DIR}/test-functions)
 
    add_executable(test-factor test_factor.cxx ${headers})
    add_test(test-factor ${CMAKE_CURRENT_BINARY_DIR}/test-factor)
@@ -122,5 +119,4 @@ if(BUILD_TESTING)
    add_test(test-canonicalview ${CMAKE_CURRENT_BINARY_DIR}/test-canonicalview)
 
    add_subdirectory(inference)
-   add_subdirectory(learning)
 endif()
diff --git a/src/unittest/inference/test_graphcut.cxx b/src/unittest/inference/test_graphcut.cxx
index 2b680e6..55a62e0 100644
--- a/src/unittest/inference/test_graphcut.cxx
+++ b/src/unittest/inference/test_graphcut.cxx
@@ -68,7 +68,7 @@ int main() {
    {
       typedef opengm::external::MinSTCutIBFS<int, int> MinStCutType;
       typedef opengm::GraphCut<GraphicalModelType, opengm::Minimizer, MinStCutType> MinGraphCut;
-      MinGraphCut::Parameter para(10000.0f);
+      MinGraphCut::Parameter para(10000);
       minTester.test<MinGraphCut>(para);
    }
 #endif
@@ -103,7 +103,7 @@ int main() {
    {
       typedef opengm::MinSTCutBoost<size_t, long, opengm::PUSH_RELABEL> MinStCutType;
       typedef opengm::GraphCut<GraphicalModelType, opengm::Minimizer, MinStCutType> MinGraphCut;
-      MinGraphCut::Parameter para(1000000.f);
+      MinGraphCut::Parameter para(1000000);
       minTester.test<MinGraphCut>(para);
    }
    std::cout << "  * Test Min-Sum with BOOST-Edmonds-Karp" << std::endl;
@@ -128,7 +128,7 @@ int main() {
    {
       typedef opengm::external::MinSTCutIBFS<int, int> MinStCutType;
       typedef opengm::GraphCut<GraphicalModelType, opengm::Maximizer, MinStCutType> MaxGraphCut;
-      MaxGraphCut::Parameter para(10000.0f);
+      MaxGraphCut::Parameter para(10000);
       maxTester.test<MaxGraphCut>(para);
 
    }
diff --git a/src/unittest/inference/test_lazyflipper.cxx b/src/unittest/inference/test_lazyflipper.cxx
index cee5372..6645c40 100644
--- a/src/unittest/inference/test_lazyflipper.cxx
+++ b/src/unittest/inference/test_lazyflipper.cxx
@@ -83,7 +83,7 @@ void additionalTest() {
    }
 
    {
-      LazyFlipper::Parameter parameter(size_t(6));
+      LazyFlipper::Parameter parameter(6);
       LazyFlipper lazyFlipper(model, parameter);
       lazyFlipper.infer();
 
diff --git a/src/unittest/inference/test_messagepassing.cxx b/src/unittest/inference/test_messagepassing.cxx
index 9bed1b0..a065942 100644
--- a/src/unittest/inference/test_messagepassing.cxx
+++ b/src/unittest/inference/test_messagepassing.cxx
@@ -167,7 +167,7 @@ int main() {
          typedef opengm::GraphicalModel<double, opengm::Adder> GraphicalModelType;
          typedef opengm::BeliefPropagationUpdateRules<GraphicalModelType,opengm::Minimizer> UpdateRulesType;
          typedef opengm::MessagePassing<GraphicalModelType, opengm::Minimizer,UpdateRulesType, opengm::MaxDistance>            BP;
-         BP::Parameter para(size_t(10));
+         BP::Parameter para(10);
          sumTester.test<BP>(para); 
          std::cout << " ... parallel ... ";
          para.isAcyclic_=opengm::Tribool::False;
@@ -179,7 +179,7 @@ int main() {
          typedef opengm::GraphicalModel<double, opengm::Adder> GraphicalModelType;
          typedef opengm::BeliefPropagationUpdateRules<GraphicalModelType,opengm::Minimizer> UpdateRulesType;
          typedef opengm::MessagePassing<GraphicalModelType, opengm::Minimizer,UpdateRulesType, opengm::MaxDistance>            BP;
-         BP::Parameter para(size_t(100));
+         BP::Parameter para(100);
          para.isAcyclic_ = false;
          sumTester.test<BP>(para);
          std::cout << " OK!"<<std::endl;
@@ -198,7 +198,7 @@ int main() {
          typedef opengm::GraphicalModel<double,opengm::Adder> GraphicalModelType;
          typedef opengm::BeliefPropagationUpdateRules<GraphicalModelType,opengm::Maximizer> UpdateRulesType;
          typedef opengm::MessagePassing<GraphicalModelType, opengm::Maximizer,UpdateRulesType, opengm::MaxDistance>            BP;
-         BP::Parameter para(size_t(10));
+         BP::Parameter para(10);
          sumTester.test<BP>(para);
          std::cout << " OK!"<<std::endl;
       }
@@ -207,7 +207,7 @@ int main() {
          typedef opengm::GraphicalModel<double,opengm::Multiplier  > GraphicalModelType;
          typedef opengm::BeliefPropagationUpdateRules<GraphicalModelType,opengm::Maximizer> UpdateRulesType;
          typedef opengm::MessagePassing<GraphicalModelType, opengm::Maximizer,UpdateRulesType, opengm::MaxDistance>            BP;
-         BP::Parameter para(size_t(10));
+         BP::Parameter para(10);
          prodTester.test<BP>(para);
          std::cout << " OK!"<<std::endl;
        }
@@ -290,7 +290,7 @@ int main() {
       typedef opengm::GraphicalModel<double,opengm::Adder > GraphicalModelType;
       typedef opengm::TrbpUpdateRules<GraphicalModelType,opengm::Minimizer> UpdateRulesType;
       typedef opengm::MessagePassing<GraphicalModelType, opengm::Minimizer,UpdateRulesType, opengm::MaxDistance>            BP;
-      BP::Parameter para(size_t(100));
+      BP::Parameter para(10);
       sumTester.test<BP>(para);
       std::cout << " ... parallel ... ";
       para.isAcyclic_=opengm::Tribool::False;
@@ -323,7 +323,7 @@ int main() {
       typedef opengm::GraphicalModel<double,opengm::Adder>   GraphicalModelType;
       typedef opengm::TrbpUpdateRules<GraphicalModelType,opengm::Maximizer> UpdateRulesType;
       typedef opengm::MessagePassing<GraphicalModelType, opengm::Maximizer, UpdateRulesType,opengm::MaxDistance>            BP;
-      BP::Parameter para(size_t(10));
+      BP::Parameter para(10);
       sumTester.test<BP>(para);
       std::cout << " OK!"<<std::endl;
     }
@@ -332,7 +332,7 @@ int main() {
       typedef opengm::GraphicalModel<double,opengm::Multiplier  > GraphicalModelType;
       typedef opengm::TrbpUpdateRules<GraphicalModelType,opengm::Maximizer> UpdateRulesType;
       typedef opengm::MessagePassing<GraphicalModelType, opengm::Maximizer, UpdateRulesType,opengm::MaxDistance>            BP;
-      BP::Parameter para(size_t(10));
+      BP::Parameter para(10);
       prodTester.test<BP>(para);
       std::cout << " OK!"<<std::endl;
     }
@@ -341,7 +341,7 @@ int main() {
       typedef opengm::GraphicalModel<double,opengm::Multiplier > GraphicalModelType;
       typedef opengm::TrbpUpdateRules<GraphicalModelType,opengm::Maximizer> UpdateRulesType;
       typedef opengm::MessagePassing<GraphicalModelType, opengm::Maximizer, UpdateRulesType, opengm::MaxDistance>            BP;
-      BP::Parameter para(size_t(10));
+      BP::Parameter para(10);
       prodTester.test<BP>(para);
       std::cout << " OK!"<<std::endl;
     }
@@ -359,7 +359,7 @@ int main() {
       typedef opengm::GraphicalModel<double,opengm::Adder > GraphicalModelType;
       typedef opengm::TrbpUpdateRules<GraphicalModelType,opengm::Integrator> UpdateRulesType;
       typedef opengm::MessagePassing<GraphicalModelType, opengm::Integrator,UpdateRulesType, opengm::MaxDistance>            BP;
-      BP::Parameter para(size_t(10));
+      BP::Parameter para(10);
       sumTester.test<BP>(para);
       std::cout << " OK!"<<std::endl;
     }
@@ -368,7 +368,7 @@ int main() {
       typedef opengm::GraphicalModel<double,opengm::Multiplier > GraphicalModelType;
       typedef opengm::TrbpUpdateRules<GraphicalModelType,opengm::Integrator> UpdateRulesType;
       typedef opengm::MessagePassing<GraphicalModelType, opengm::Integrator,UpdateRulesType, opengm::MaxDistance>            BP;
-      BP::Parameter para(size_t(10));
+      BP::Parameter para(10);
       prodTester.test<BP>(para);
       std::cout << " OK!"<<std::endl;
     }
diff --git a/src/unittest/learning/CMakeLists.txt b/src/unittest/learning/CMakeLists.txt
deleted file mode 100644
index 9c46812..0000000
--- a/src/unittest/learning/CMakeLists.txt
+++ /dev/null
@@ -1,51 +0,0 @@
-
-add_definitions(-DOPENGM_DEBUG)
-
-if(BUILD_TESTING)
-
-    add_executable(test-gridsearch-learner test_gridsearch_learner.cxx ${headers})
-    add_test(test-gridsearch-learner ${CMAKE_CURRENT_BINARY_DIR}/test-gridsearch-learner)
-
-    add_executable(test-maximum-likelihood-learner test_maximum_likelihood_learner.cxx ${headers})
-    add_test(test-maximum-likelihood-learner ${CMAKE_CURRENT_BINARY_DIR}/test-maximum-likelihood-learner)
-
-    if(WITH_HDF5)
-        add_executable(test-dataset-io test_dataset_io.cxx ${headers})
-        target_link_libraries(test-dataset-io ${HDF5_LIBRARIES})
-        add_test(test-dataset-io ${CMAKE_CURRENT_BINARY_DIR}/test-dataset-io)
-
-        add_executable(test-dataset test_dataset.cxx ${headers})
-        target_link_libraries(test-dataset ${HDF5_LIBRARIES})
-        add_test(test-dataset ${CMAKE_CURRENT_BINARY_DIR}/test-dataset)
-    endif()
-
-    if(WITH_TRWS)
-        if(WITH_GUROBI)
-            ADD_EXECUTABLE(test-learning test_learning.cxx ${headers})
-            target_link_libraries(test-learning ${GUROBI_CXX_LIBRARY} ${GUROBI_LIBRARY})
-            target_link_libraries(test-learning external-library-trws)
-            add_test(test-learning ${CMAKE_CURRENT_BINARY_DIR}/test-learning) 
-        else()
-            if(WITH_CPLEX)
-                ADD_EXECUTABLE(test-learning test_learning.cxx ${headers})
-                target_link_libraries(test-learning ${CPLEX_LIBRARIES} ${CMAKE_THREAD_LIBS_INIT})
-                target_link_libraries(test-learning external-library-trws)
-                add_test(test-learning ${CMAKE_CURRENT_BINARY_DIR}/test-learning)
-            endif()
-        endif()
-    endif()
-
-
-    if(WITH_CPLEX)
-        ADD_EXECUTABLE(test-subgradient-ssvm test_subgradient_ssvm.cxx ${headers})
-        if(OPENMP_FOUND)
-            SET_TARGET_PROPERTIES(test-subgradient-ssvm PROPERTIES COMPILE_FLAGS "${OpenMP_CXX_FLAGS}")
-            SET_TARGET_PROPERTIES(test-subgradient-ssvm PROPERTIES LINK_FLAGS "${OpenMP_CXX_FLAGS}")
-        endif()
-        target_link_libraries(test-subgradient-ssvm ${CPLEX_LIBRARIES} ${CMAKE_THREAD_LIBS_INIT})
-        add_test(test-subgradient-ssvm ${CMAKE_CURRENT_BINARY_DIR}/test-subgradient-ssvm)
-    endif()
-
-  add_executable(test-generalized-hammingloss test_generalized_hammingloss.cxx ${headers})
-  add_test(test-generalized-hammingloss ${CMAKE_CURRENT_BINARY_DIR}/test-generalized-hammingloss)
-endif()
diff --git a/src/unittest/learning/test_dataset.cxx b/src/unittest/learning/test_dataset.cxx
deleted file mode 100644
index b84e2ec..0000000
--- a/src/unittest/learning/test_dataset.cxx
+++ /dev/null
@@ -1,150 +0,0 @@
-#include <vector>
-
-#include <opengm/functions/explicit_function.hxx>
-#include <opengm/unittests/test.hxx>
-#include <opengm/graphicalmodel/graphicalmodel.hxx>
-#include <opengm/operations/adder.hxx>
-#include <opengm/operations/minimizer.hxx>
-#include <opengm/inference/icm.hxx>
-#include <opengm/utilities/metaprogramming.hxx>
-
-#include <opengm/functions/learnable/lpotts.hxx>
-#include <opengm/functions/learnable/lweightedsum_of_functions.hxx>
-//#include <opengm/learning/dataset/testdataset.hxx>
-//#include <opengm/learning/dataset/testdataset2.hxx>
-#include <opengm/learning/dataset/dataset_io.hxx>
-#include <opengm/learning/dataset/dataset.hxx>
-#include <opengm/learning/dataset/testdatasets.hxx>
-#include <opengm/learning/loss/noloss.hxx>
-#include <opengm/learning/loss/hammingloss.hxx>
-
-
-//*************************************
-typedef double ValueType;
-typedef size_t IndexType;
-typedef size_t LabelType; 
-typedef opengm::meta::TypeListGenerator<opengm::ExplicitFunction<ValueType,IndexType,LabelType>, opengm::functions::learnable::LPotts<ValueType,IndexType,LabelType>, opengm::functions::learnable::LWeightedSumOfFunctions<ValueType,IndexType,LabelType> >::type FunctionListType;
-typedef opengm::GraphicalModel<ValueType,opengm::Adder, FunctionListType, opengm::DiscreteSpace<IndexType,LabelType> > GM; 
-//typedef opengm::datasets::TestDataset<GM>  DS1;
-//typedef opengm::datasets::Dataset<GM>      DS;
-
-typedef opengm::learning::HammingLoss     LOSS;
-//typedef opengm::learning::NoLoss                 LOSS;
-typedef opengm::datasets::TestDataset1<GM,LOSS>  DS0;
-typedef opengm::datasets::TestDataset1<GM,LOSS>  DS1;
-typedef opengm::datasets::TestDataset2<GM,LOSS>  DS2;
-typedef opengm::datasets::Dataset<GM,LOSS>       DS;
-
-//*************************************
-
-template<class DatasetType>
-struct DatasetTest {
-
-   DatasetType& dataset_;
-
-   DatasetTest(DatasetType& data): dataset_(data) {}
-
-   void testInitialization() {
-      std::cout << "Initialize Model:" << std::endl;
-      // create a new dataset
-      DatasetType dataset;
-   }
-
-   void callModelFunctions(){
-
-         std::cout << "calling Model functions:" << std::endl;
-         std::cout << "\tlocking all available Models" << std::endl;
-
-         for(int i; i<dataset_.getNumberOfModels();i++)
-         {
-            dataset_.lockModel(i);
-            dataset_.unlockModel(i);
-         }
-
-         std::cout << "\tgetModel with and without loss" << std::endl;
-         for(int i; i<dataset_.getNumberOfModels();i++)
-         {
-            OPENGM_TEST(dataset_.getModel(i).numberOfVariables() == dataset_.getModelWithLoss(i).numberOfVariables());
-            OPENGM_TEST(dataset_.getModel(i).numberOfFactors() <=  dataset_.getModelWithLoss(i).numberOfFactors());
-         }
-
-         std::cout << "\tgetGT" << std::endl;
-         for(int i; i<dataset_.getNumberOfModels();i++)
-         {
-            std::cout << dataset_.getGT(i).size() << std::endl;
-         }
-
-      }
-
-   void getInfo(){
-         std::cout << "Info of data size:" << std::endl;
-         std::cout << "\tnumberOfWeights\t" << dataset_.getNumberOfWeights() << std::endl;
-         std::cout << "\tnumberOfModels\t" << dataset_.getNumberOfModels() << std::endl;
-
-         opengm::learning::Weights<ValueType> weights = dataset_.getWeights();
-         std::cout << "Beginning of weight vector: ";
-         for(int i; i<std::min(dataset_.getNumberOfWeights(),size_t(10));i++)
-         {
-            std::cout << dataset_.getWeights()[i] << " ";
-         }
-         std::cout << std::endl;
-   }
-
-   void run() {
-      this->testInitialization();
-      this->getInfo();
-      this->callModelFunctions();
-   }
-};
-
-
-int main() {
-   std::cout << " Includes are fine :-) " << std::endl; 
-  
-   {
-      // initialize your data here
-      // eventually you need to load it from file
-      DS data;
-  
-      std::cout << "Start test DS" <<std::endl;
-      //run tests on dataset
-      DatasetTest<DS >t(data);
-      t.run();
-   } 
-
-   {
-      // initialize your data here
-      // eventually you need to load it from file
-      DS0 data;
-
-      std::cout << "Start test DS0" <<std::endl;
-      //run tests on dataset
-      DatasetTest<DS0 >t(data);
-      t.run();
-
-   }
-   {
-      // initialize your data here
-      // eventually you need to load it from file
-      DS1 data;
-
-      std::cout << "Start test DS1" <<std::endl;
-      //run tests on dataset
-      DatasetTest<DS1 >t(data);
-      t.run();
-
-   }
-   {
-      // initialize your data here
-      // eventually you need to load it from file
-      DS2 data;
-
-      std::cout << "Start test DS2" <<std::endl;
-      //run tests on dataset
-      DatasetTest<DS2 >t(data);
-      t.run();
-
-   }
-
-
-}
diff --git a/src/unittest/learning/test_dataset_io.cxx b/src/unittest/learning/test_dataset_io.cxx
deleted file mode 100644
index 65d98d4..0000000
--- a/src/unittest/learning/test_dataset_io.cxx
+++ /dev/null
@@ -1,101 +0,0 @@
-#include <vector>
-
-#include <opengm/functions/explicit_function.hxx>
-#include <opengm/unittests/test.hxx>
-#include <opengm/graphicalmodel/graphicalmodel.hxx>
-#include <opengm/operations/adder.hxx>
-#include <opengm/operations/minimizer.hxx>
-#include <opengm/inference/icm.hxx>
-#include <opengm/utilities/metaprogramming.hxx>
-
-#include <opengm/functions/learnable/lpotts.hxx>
-#include <opengm/functions/learnable/lweightedsum_of_functions.hxx>
-//#include <opengm/learning/dataset/testdataset.hxx>
-//#include <opengm/learning/dataset/testdataset2.hxx>
-#include <opengm/learning/dataset/dataset_io.hxx>
-#include <opengm/learning/dataset/dataset.hxx>
-#include <opengm/learning/dataset/testdatasets.hxx>
-#include <opengm/learning/loss/noloss.hxx>
-#include <opengm/learning/loss/hammingloss.hxx>
-#include <opengm/learning/loss/generalized-hammingloss.hxx>
-
-
-//*************************************
-typedef double ValueType;
-typedef size_t IndexType;
-typedef size_t LabelType; 
-typedef opengm::meta::TypeListGenerator<opengm::ExplicitFunction<ValueType,IndexType,LabelType>, opengm::functions::learnable::LPotts<ValueType,IndexType,LabelType>, opengm::functions::learnable::LWeightedSumOfFunctions<ValueType,IndexType,LabelType> >::type FunctionListType;
-typedef opengm::GraphicalModel<ValueType,opengm::Adder, FunctionListType, opengm::DiscreteSpace<IndexType,LabelType> > GM; 
-typedef opengm::learning::NoLoss                 LOSS1;
-typedef opengm::learning::HammingLoss            LOSS2;
-typedef opengm::learning::GeneralizedHammingLoss LOSS3;
-typedef opengm::datasets::TestDataset1<GM,LOSS1>  DS11;
-typedef opengm::datasets::TestDataset2<GM,LOSS1>  DS21;
-typedef opengm::datasets::TestDataset1<GM,LOSS2>  DS12;
-typedef opengm::datasets::TestDataset2<GM,LOSS2>  DS22;
-typedef opengm::datasets::TestDataset1<GM,LOSS3>  DS13;
-typedef opengm::datasets::Dataset<GM,LOSS1>       DS1;
-typedef opengm::datasets::Dataset<GM,LOSS2>       DS2;
-typedef opengm::datasets::Dataset<GM,LOSS3>       DS3;
-
-//*************************************
-
-
-int main() {
-   std::cout << " Includes are fine :-) " << std::endl; 
-  
-   {
-      DS11 dataset;
-      std::cout << "Dataset includes " << dataset.getNumberOfModels() << " instances and has " << dataset.getNumberOfWeights() << " parameters."<<std::endl;
-      opengm::datasets::DatasetSerialization::save(dataset,"./","dataset11_");
-      std::cout <<"done!" <<std::endl;
-   }
-   {
-      DS12 dataset;
-      std::cout << "Dataset includes " << dataset.getNumberOfModels() << " instances and has " << dataset.getNumberOfWeights() << " parameters."<<std::endl;
-      opengm::datasets::DatasetSerialization::save(dataset,"./","dataset12_");
-      std::cout <<"done!" <<std::endl;
-   }
-   {
-      DS21 dataset;
-      std::cout << "Dataset includes " << dataset.getNumberOfModels() << " instances and has " << dataset.getNumberOfWeights() << " parameters."<<std::endl;
-      opengm::datasets::DatasetSerialization::save(dataset,"./","dataset21_");
-      std::cout <<"done!" <<std::endl;
-   }
-   {
-      DS22 dataset;
-      std::cout << "Dataset includes " << dataset.getNumberOfModels() << " instances and has " << dataset.getNumberOfWeights() << " parameters."<<std::endl;
-      opengm::datasets::DatasetSerialization::save(dataset,"./","dataset22_");
-      std::cout <<"done!" <<std::endl;
-   }
-   {
-      DS13 dataset;
-      std::cout << "Dataset includes " << dataset.getNumberOfModels() << " instances and has " << dataset.getNumberOfWeights() << " parameters."<<std::endl;
-      opengm::datasets::DatasetSerialization::save(dataset,"./","dataset13_");
-      std::cout <<"done!" <<std::endl;
-   }
-
-   #ifndef CI
-   {
-      DS1 ds;
-      opengm::datasets::DatasetSerialization::loadAll("./","dataset11_",ds);
-   }
-   {
-      DS1 ds;
-      opengm::datasets::DatasetSerialization::loadAll("./","dataset21_",ds);
-   }
-   {
-      DS2 ds;
-      opengm::datasets::DatasetSerialization::loadAll("./","dataset12_",ds);
-   }
-   {
-      DS2 ds;
-      opengm::datasets::DatasetSerialization::loadAll("./","dataset22_",ds);
-   }
-   {
-      DS3 ds;
-      opengm::datasets::DatasetSerialization::loadAll("./","dataset13_",ds);
-   }
-   #endif
-   std::cout << "test successful." << std::endl;
-}
diff --git a/src/unittest/learning/test_generalized_hammingloss.cxx b/src/unittest/learning/test_generalized_hammingloss.cxx
deleted file mode 100644
index 743a4e3..0000000
--- a/src/unittest/learning/test_generalized_hammingloss.cxx
+++ /dev/null
@@ -1,65 +0,0 @@
-#include <vector>
-#include <iostream>
-
-#include <opengm/learning/loss/generalized-hammingloss.hxx>
-#include <opengm/graphicalmodel/graphicalmodel.hxx>
-#include <opengm/graphicalmodel/graphicalmodel_factor.hxx>
-
-//*************************************
-typedef double ValueType;
-typedef size_t IndexType;
-typedef size_t LabelType;
-typedef opengm::meta::TypeListGenerator<opengm::ExplicitFunction<ValueType,IndexType,LabelType> >::type FunctionListType;
-typedef opengm::GraphicalModel<ValueType,opengm::Adder, FunctionListType, opengm::DiscreteSpace<IndexType,LabelType> > GM;
-
-//*************************************
-
-
-int main() {
-
-   opengm::learning::GeneralizedHammingLoss::Parameter param;
-   param.labelLossMultiplier_.push_back(2.0);
-   param.labelLossMultiplier_.push_back(1.0);
-   param.labelLossMultiplier_.push_back(0.5);
-
-   param.nodeLossMultiplier_.push_back(5.0);
-   param.nodeLossMultiplier_.push_back(6.0);
-   param.nodeLossMultiplier_.push_back(7.0);
-   param.nodeLossMultiplier_.push_back(8.0);
-
-   // create loss
-   opengm::learning::GeneralizedHammingLoss loss(param);
-
-   // evaluate for a test point
-   std::vector<size_t> labels;
-   labels.push_back(0);
-   labels.push_back(1);
-   labels.push_back(2);
-   labels.push_back(2);
-
-   std::vector<size_t> ground_truth;
-   ground_truth.push_back(1);
-   ground_truth.push_back(1);
-   ground_truth.push_back(1);
-   ground_truth.push_back(1);
-
-
-   // add loss to a model and evaluate for a given labeling
-   GM gm;
-   size_t numberOfLabels = 3;
-   gm.addVariable(numberOfLabels);
-   gm.addVariable(numberOfLabels);
-   gm.addVariable(numberOfLabels);
-   gm.addVariable(numberOfLabels);
-   OPENGM_ASSERT_OP(loss.loss(gm, labels.begin(), labels.end(), ground_truth.begin(), ground_truth.end()), ==, 17.5);
-
-   // add a unary to node 2 (if indexed from 1)
-   opengm::ExplicitFunction<GM::ValueType,GM::IndexType,GM::LabelType> f(&numberOfLabels, &(numberOfLabels)+1, 2.0);
-   size_t variableIndex = 1;
-   gm.addFactor(gm.addFunction(f), &variableIndex, &variableIndex+1);
-   OPENGM_ASSERT_OP(gm.evaluate(labels.begin()), ==, 2.0);
-
-   // loss augmented model:
-   loss.addLoss(gm, ground_truth.begin());
-   OPENGM_ASSERT_OP(gm.evaluate(labels.begin()), ==, -15.5);
-}
diff --git a/src/unittest/learning/test_gridsearch_learner.cxx b/src/unittest/learning/test_gridsearch_learner.cxx
deleted file mode 100644
index 3684cc2..0000000
--- a/src/unittest/learning/test_gridsearch_learner.cxx
+++ /dev/null
@@ -1,90 +0,0 @@
-#include <vector>
-
-#include <opengm/functions/explicit_function.hxx>
-#include <opengm/unittests/test.hxx>
-#include <opengm/graphicalmodel/graphicalmodel.hxx>
-#include <opengm/operations/adder.hxx>
-#include <opengm/operations/minimizer.hxx>
-#include <opengm/inference/icm.hxx>
-#include <opengm/utilities/metaprogramming.hxx>
-
-#include <opengm/functions/learnable/lpotts.hxx>
-#include <opengm/functions/learnable/lweightedsum_of_functions.hxx>
-#include <opengm/learning/gridsearch-learning.hxx>
-#include <opengm/learning/loss/hammingloss.hxx>
-//#include <opengm/learning/dataset/testdataset.hxx>
-//#include <opengm/learning/dataset/testdataset2.hxx>
-#include <opengm/learning/dataset/testdatasets.hxx>
-
-
-//*************************************
-typedef double ValueType;
-typedef size_t IndexType;
-typedef size_t LabelType; 
-typedef opengm::meta::TypeListGenerator<opengm::ExplicitFunction<ValueType,IndexType,LabelType>, opengm::functions::learnable::LPotts<ValueType,IndexType,LabelType>, opengm::functions::learnable::LWeightedSumOfFunctions<ValueType,IndexType,LabelType> >::type FunctionListType;
-typedef opengm::GraphicalModel<ValueType,opengm::Adder, FunctionListType, opengm::DiscreteSpace<IndexType,LabelType> > GM; 
-//typedef opengm::datasets::TestDataset<GM>  DS;
-//typedef opengm::datasets::TestDataset2<GM> DS2;
-typedef opengm::learning::HammingLoss     LOSS;
-typedef opengm::ICM<GM,opengm::Minimizer> INF;
-typedef opengm::datasets::TestDataset1<GM,LOSS> DS1;
-typedef opengm::datasets::TestDataset2<GM,LOSS> DS2;
-
-//*************************************
-
-
-int main() {
-   std::cout << " Includes are fine :-) " << std::endl; 
-   /* 
-   {
-      DS dataset;
-      std::cout << "Dataset includes " << dataset.getNumberOfModels() << " instances and has " << dataset.getNumberOfWeights() << " parameters."<<std::endl;
-      
-      
-      opengm::learning::GridSearchLearner<DS,LOSS>::Parameter para;
-      para.parameterUpperbound_.resize(1,1);
-      para.parameterLowerbound_.resize(1,0);
-      para.testingPoints_.resize(1,10);
-      opengm::learning::GridSearchLearner<DS,LOSS> learner(dataset,para);
-      
-      
-      INF::Parameter infPara;
-      learner.learn<INF>(infPara);
-      
-   } 
-   */
-   {
-      DS1 dataset;
-      std::cout << "Dataset includes " << dataset.getNumberOfModels() << " instances and has " << dataset.getNumberOfWeights() << " parameters."<<std::endl;
-      
-      
-      opengm::learning::GridSearchLearner<DS1>::Parameter para;
-      para.parameterUpperbound_.resize(1,1);
-      para.parameterLowerbound_.resize(1,0);
-      para.testingPoints_.resize(1,10);
-      opengm::learning::GridSearchLearner<DS1> learner(dataset,para);
-      
-      
-      INF::Parameter infPara;
-      learner.learn<INF>(infPara);
-      
-   }
-
-   {
-      DS2 dataset;
-      std::cout << "Dataset includes " << dataset.getNumberOfModels() << " instances and has " << dataset.getNumberOfWeights() << " parameters."<<std::endl;
-      
-      
-      opengm::learning::GridSearchLearner<DS2>::Parameter para;
-      para.parameterUpperbound_.resize(3,1);
-      para.parameterLowerbound_.resize(3,0);
-      para.testingPoints_.resize(3,5);
-      opengm::learning::GridSearchLearner<DS2> learner(dataset,para);
-      
-      
-      INF::Parameter infPara;
-      learner.learn<INF>(infPara);
-   }
-
-
-}
diff --git a/src/unittest/learning/test_learning.cxx b/src/unittest/learning/test_learning.cxx
deleted file mode 100644
index 289dfdc..0000000
--- a/src/unittest/learning/test_learning.cxx
+++ /dev/null
@@ -1,233 +0,0 @@
-#include <vector>
-
-#include <opengm/functions/explicit_function.hxx>
-#include <opengm/unittests/test.hxx>
-#include <opengm/graphicalmodel/graphicalmodel.hxx>
-#include <opengm/operations/adder.hxx>
-#include <opengm/operations/minimizer.hxx>
-#include <opengm/inference/external/trws.hxx>
-#include <opengm/utilities/metaprogramming.hxx>
-
-#ifdef WITH_GUROBI
-#include <opengm/inference/lpgurobi.hxx>
-#else
-#include <opengm/inference/lpcplex.hxx>
-#endif
-
-
-#include <opengm/functions/learnable/lpotts.hxx>
-#include <opengm/functions/learnable/lweightedsum_of_functions.hxx>
-#include <opengm/learning/struct-max-margin.hxx>
-#include <opengm/learning/loss/hammingloss.hxx>
-//#include <opengm/learning/dataset/testdataset.hxx>
-//#include <opengm/learning/dataset/testdataset2.hxx>
-#include <opengm/learning/dataset/testdatasets.hxx>
-#include <opengm/learning/dataset/editabledataset.hxx>
-
-
-//*************************************
-typedef double ValueType;
-typedef size_t IndexType;
-typedef size_t LabelType; 
-typedef opengm::meta::TypeListGenerator<opengm::ExplicitFunction<ValueType,IndexType,LabelType>, opengm::functions::learnable::LPotts<ValueType,IndexType,LabelType>, opengm::functions::learnable::LWeightedSumOfFunctions<ValueType,IndexType,LabelType> >::type FunctionListType;
-typedef opengm::GraphicalModel<ValueType,opengm::Adder, FunctionListType, opengm::DiscreteSpace<IndexType,LabelType> > GM; 
-typedef opengm::learning::HammingLoss     LOSS;
-
-#ifdef WITH_GUROBI
-typedef opengm::LPGurobi<GM,opengm::Minimizer> INF;
-#else
-typedef opengm::LPCplex<GM,opengm::Minimizer> INF;
-#endif
-typedef opengm::datasets::EditableTestDataset<GM,LOSS> EDS;
-typedef opengm::datasets::TestDataset1<GM,LOSS> DS1;
-typedef opengm::datasets::TestDataset2<GM,LOSS> DS2;
-typedef opengm::datasets::TestDatasetSimple<GM,LOSS> DSS;
-
-//*************************************
-
-
-int main() {
-   std::cout << " Includes are fine :-) " << std::endl; 
-
-   {
-	  DSS dataset(5);
-	  std::cout << "Dataset includes " << dataset.getNumberOfModels() << " instances and has " << dataset.getNumberOfWeights() << " parameters."<<std::endl;
-	  
-	  
-	  opengm::learning::StructMaxMargin<DSS>::Parameter para;
-	  opengm::learning::StructMaxMargin<DSS> learner(dataset,para);
-	  
-	  
-	  INF::Parameter infPara;
-	  infPara.integerConstraint_ = true;
-	  learner.learn<INF>(infPara); 
-		  const DSS::Weights& weights = learner.getWeights();
-		  std::cout <<"Weights: ";
-		  for (size_t i=0; i<weights.numberOfWeights(); ++i)
-			 std::cout << weights[i] <<" ";
-		  std::cout <<std::endl;
-   }
-
-   {
-	  DS1 dataset(4);
-	  std::cout << "Dataset includes " << dataset.getNumberOfModels() << " instances and has " << dataset.getNumberOfWeights() << " parameters."<<std::endl;
-	  
-	  
-      opengm::learning::StructMaxMargin<DS1>::Parameter para;
-      opengm::learning::StructMaxMargin<DS1> learner(dataset,para);
-	  
-	  
-	  INF::Parameter infPara;
-	  infPara.integerConstraint_ = true;
-	  learner.learn<INF>(infPara);
-          const DS1::Weights& weights = learner.getWeights();
-          std::cout <<"Weights: ";
-          for (size_t i=0; i<weights.numberOfWeights(); ++i)
-             std::cout << weights[i] <<" ";
-          std::cout <<std::endl;
-	  
-   }
-
-   {
-	  DS2 dataset(4);
-	  std::cout << "Dataset includes " << dataset.getNumberOfModels() << " instances and has " << dataset.getNumberOfWeights() << " parameters."<<std::endl;
-	  
-	  
-	  opengm::learning::StructMaxMargin<DS2>::Parameter para;
-	  para.optimizerParameter_.lambda = 1000.0;
-	  opengm::learning::StructMaxMargin<DS2> learner(dataset,para);
-	  
-	  
-	  INF::Parameter infPara;
-	  infPara.integerConstraint_ = true;
-	  learner.learn<INF>(infPara);
-		  const DS2::Weights& weights = learner.getWeights();
-		  std::cout <<"Weights: ";
-		  for (size_t i=0; i<weights.numberOfWeights(); ++i)
-			 std::cout << weights[i] <<" ";
-		  std::cout <<std::endl;
-   }
-
-/* Does this make sence?!?
-   {
-        // create editable dataset
-        EDS learningdataset;
-
-        INF::Parameter infPara;
-        infPara.integerConstraint_ = true;
-
-        std::vector< std::vector< LabelType > >GTSolutionVector;
-
-        std::cout << "inference with fixed, arbitrary weights to generate solution" << std::endl;
-
-        EDS::Weights learningWeightVector = learningdataset.getWeights();
-        EDS::Weights randomWeights(learningdataset.getNumberOfWeights());
-
-
-        // opengm::learning::StructMaxMargin<EDS>::Parameter para0;
-        // para0.optimizerParameter_.lambda = 1;
-        // opengm::learning::StructMaxMargin<EDS> learner0(learningdataset,para0);
-
-        // // // learn
-        // learner0.learn<INF>(infPara);
-
-        // std::srand(std::time(0));
-        for (int i = 0; i < learningWeightVector.numberOfWeights(); ++i)
-        {
-            randomWeights[i] = 1.0;
-
-            std::cout << randomWeights[i] << " --->  "  << learningWeightVector[i] << std::endl;
-            learningWeightVector.setWeight(i, randomWeights[i]);//double(std::rand()) / RAND_MAX * 100);
-        }
-
-        for (size_t modelIndex = 0; modelIndex < learningdataset.getNumberOfModels(); modelIndex++)
-        {
-
-            std::cout << "starting inference on GM " << modelIndex << std::endl;
-
-            // INF inference(learningdataset.getModel(modelIndex), infPara);
-            // inference.infer();
-            // std::vector< LabelType > sol1;
-            
-            // OPENGM_TEST(inference.arg(sol1) == opengm::NORMAL);
-
-            INF solver(learningdataset.getModel(modelIndex),infPara);
-            solver.infer();
-            std::vector< LabelType > sol1;
-            OPENGM_TEST(solver.arg(sol1) == opengm::NORMAL);
-
-
-            std::cout << "add solution "<< modelIndex <<" to new dataset" << std::endl;
-            learningdataset.setGT(modelIndex,sol1);
-
-            for (size_t j = 0; j < sol1.size(); j++)
-            {
-              std::cout << sol1[j];
-            }
-            std::cout << std::endl;
-            GTSolutionVector.push_back(sol1);
-        }
-
-
-        std::cout << "learn weights (without regularization)" << std::endl;
-
-        std::cout << "weight vector size " << learningdataset.getNumberOfWeights() << std::endl;
-        // Parameter
-        opengm::learning::StructMaxMargin<EDS>::Parameter para;
-        para.optimizerParameter_.lambda = 0.000000001;
-        opengm::learning::StructMaxMargin<EDS> learner(learningdataset,para);
-
-        // learn
-        learner.learn<INF>(infPara);
-
-        // get the result
-        const EDS::Weights &learnedParameters = learner.getWeights();
-        std::cout << learnedParameters.numberOfWeights() << std::endl;
-        std::cout << "set learnedParameters as new Weights: ";
-        for (size_t i = 0; i < learnedParameters.numberOfWeights(); ++i)
-        {
-            std::cout << learnedParameters[i] << " ";
-            learningWeightVector.setWeight(i, learnedParameters[i]);
-        }
-        std::cout << std::endl;
-
-        std::cout << "new weights: ";
-        for (int i = 0; i < learningWeightVector.numberOfWeights(); i++)
-        {
-            std::cout << learningWeightVector[i] << ", ";
-        }
-        std::cout << std::endl;
-
-
-        std::cout << "inference with new weights" << std::endl;
-        for (size_t modelIndex = 0; modelIndex < learningdataset.getNumberOfModels(); modelIndex++)
-        {
-            std::cout << "starting inference on GM " << modelIndex << " with learned weights" << std::endl;
-            INF solver(learningdataset.getModel(modelIndex),infPara);
-            solver.infer();
-            std::vector< LabelType > sol2;
-            OPENGM_TEST(solver.arg(sol2) == opengm::NORMAL);
-            //for (size_t j = 0; j < sol2.size(); j++)
-            //{
-            //std::cout << "sol2["<<j<<"]:" << sol2[j] << "   GTSolutionVector["<<modelIndex<<"]["<<j<<"]:" << GTSolutionVector[modelIndex][j] << std::endl; 
-            //  //!may not be true! OPENGM_TEST(sol2[j] == GTSolutionVector[modelIndex][j]);
-            //}
-            OPENGM_TEST( learningdataset.getModel(modelIndex).evaluate(sol2) ==  learningdataset.getModel(modelIndex).evaluate(GTSolutionVector[modelIndex]) );
-            // for (size_t j = 0; j < sol2.size(); j++)
-            // {
-            //    std::cout << sol2[j]; 
-            //    // OPENGM_TEST(sol2[j] == GTSolutionVector[modelIndex][j]);
-            // }
-            // std::cout << std::endl<< std::endl;
-            // for (size_t j = 0; j < sol2.size(); j++)
-            // {
-            //    std::cout <<  GTSolutionVector[modelIndex][j]; 
-            //    // OPENGM_TEST(sol2[j] == GTSolutionVector[modelIndex][j]);
-            // }
-            std::cout << "all " << sol2.size() << " solutions are correct" << std::endl;
-        }
-
-    }
-*/
-}
-
diff --git a/src/unittest/learning/test_maximum_likelihood_learner.cxx b/src/unittest/learning/test_maximum_likelihood_learner.cxx
deleted file mode 100644
index ff976a4..0000000
--- a/src/unittest/learning/test_maximum_likelihood_learner.cxx
+++ /dev/null
@@ -1,126 +0,0 @@
-#include <vector>
-
-#include <opengm/functions/explicit_function.hxx>
-#include <opengm/unittests/test.hxx>
-#include <opengm/graphicalmodel/graphicalmodel.hxx>
-#include <opengm/operations/adder.hxx>
-#include <opengm/operations/minimizer.hxx>
-#include <opengm/inference/icm.hxx>
-#include <opengm/utilities/metaprogramming.hxx>
-
-#include <opengm/functions/learnable/lpotts.hxx>
-//#include <opengm/learning/maximum-likelihood-learning.hxx>
-#include <opengm/learning/maximum_likelihood_learning.hxx>
-#include <opengm/learning/loss/hammingloss.hxx>
-#include <opengm/learning/dataset/testdatasets.hxx>
-
-
-//*************************************
-
-typedef double ValueType;
-typedef size_t IndexType;
-typedef size_t LabelType; 
-typedef opengm::meta::TypeListGenerator<
-    opengm::ExplicitFunction<ValueType,IndexType,LabelType>,
-    opengm::functions::learnable::LPotts<ValueType,IndexType,LabelType>,
-    opengm::functions::learnable::LWeightedSumOfFunctions<ValueType,IndexType,LabelType>
->::type FunctionListType;
-
-typedef opengm::GraphicalModel<
-    ValueType,opengm::Adder,
-    FunctionListType,
-    opengm::DiscreteSpace<IndexType,LabelType>
-> GM;
-
-typedef opengm::learning::HammingLoss     LOSS;
-typedef opengm::datasets::TestDataset0<GM,LOSS> DS0;
-typedef opengm::datasets::TestDataset1<GM,LOSS> DS1;
-typedef opengm::datasets::TestDataset2<GM,LOSS> DS2;
-typedef opengm::datasets::TestDatasetSimple<GM,LOSS> DSSimple;
-typedef opengm::ICM<GM,opengm::Minimizer> INF;
-
-typedef typename opengm::BeliefPropagationUpdateRules<GM, opengm::Integrator> UpdateRules;
-typedef typename opengm::MessagePassing<GM, opengm::Integrator, UpdateRules, opengm::MaxDistance> BeliefPropagation;
-//*************************************
-
-
-int main() {
-   std::cout << " Includes are fine :-) " << std::endl; 
-   /*
-   {
-      DS0 dataset;
-      std::cout << "Dataset includes " << dataset.getNumberOfModels() << " instances and has " << dataset.getNumberOfWeights() << " parameters."<<std::endl;
-      opengm::learning::MaximumLikelihoodLearner<DS0,LOSS>::Weight weight;
-      opengm::learning::MaximumLikelihoodLearner<DS0,LOSS> learner(dataset,weight);
-      INF::Parameter infWeight;
-      learner.learn<INF>(infWeight);
-
-   }
-*/
-
-
-   {
-      DS1 dataset;
-      std::cout << "Dataset includes " << dataset.getNumberOfModels() << " instances and has " << dataset.getNumberOfWeights() << " parameters."<<std::endl;
-      opengm::learning::MaximumLikelihoodLearner<DS1>::Parameter parameter;
-      parameter.maximumNumberOfIterations_ = 15;
-      parameter.gradientStepSize_ = 0.1;
-      parameter.weightStoppingCriteria_ = 0.001;
-      parameter.gradientStoppingCriteria_ = 0.00000000001;
-      parameter.infoFlag_ = true;
-      parameter.infoEveryStep_ = true;
-      parameter.weightRegularizer_ = 1.0;
-      parameter.beliefPropagationMaximumNumberOfIterations_ = 5;
-      parameter.beliefPropagationConvergenceBound_ = 0.0001;
-      parameter.beliefPropagationDamping_ = 0.5;
-      parameter.beliefPropagationTemperature_ = 0.3;
-      parameter.beliefPropagationIsAcyclic_ = opengm::Tribool(opengm::Tribool::Maybe);
-      opengm::learning::MaximumLikelihoodLearner<DS1> learner(dataset,parameter);
-
-      learner.learn();
-      
-   }
-
-   {
-      DS2 dataset;
-      std::cout << "Dataset includes " << dataset.getNumberOfModels() << " instances and has " << dataset.getNumberOfWeights() << " parameters."<<std::endl;
-      opengm::learning::MaximumLikelihoodLearner<DS2>::Parameter parameter;
-      parameter.maximumNumberOfIterations_ = 15;
-      parameter.gradientStepSize_ = 0.1;
-      parameter.weightStoppingCriteria_ = 0.001;
-      parameter.gradientStoppingCriteria_ = 0.00000000001;
-      parameter.infoFlag_ = true;
-      parameter.infoEveryStep_ = true;
-      parameter.weightRegularizer_ = 1.0;
-      parameter.beliefPropagationMaximumNumberOfIterations_ = 5;
-      parameter.beliefPropagationConvergenceBound_ = 0.0001;
-      parameter.beliefPropagationDamping_ = 0.5;
-      parameter.beliefPropagationTemperature_ = 0.3;
-      parameter.beliefPropagationIsAcyclic_ = opengm::Tribool(opengm::Tribool::Maybe);
-      opengm::learning::MaximumLikelihoodLearner<DS2> learner(dataset,parameter);
-
-      learner.learn();
-      
-   }
-/*
-
-   {
-      DS2 dataset;
-      std::cout << "Dataset includes " << dataset.getNumberOfModels() << " instances and has " << dataset.getNumberOfWeights() << " parameters."<<std::endl;
-      opengm::learning::MaximumLikelihoodLearner<DS2,LOSS>::Weight weight;
-      opengm::learning::MaximumLikelihoodLearner<DS2,LOSS> learner(dataset,weight);
-      INF::Parameter infWeight;
-      learner.learn<INF>(infWeight);
-   }
-
-/*
-   {
-      DSSimple dataset;
-      std::cout << "Dataset includes " << dataset.getNumberOfModels() << " instances and has " << dataset.getNumberOfWeights() << " parameters."<<std::endl;
-      opengm::learning::MaximumLikelihoodLearner<DSSimple,LOSS>::Weight weight;
-      opengm::learning::MaximumLikelihoodLearner<DSSimple,LOSS> learner(dataset,weight);
-      INF::Parameter infWeight;
-      learner.learn<INF>(infWeight);
-   }
-*/
-}
diff --git a/src/unittest/learning/test_subgradient_ssvm.cxx b/src/unittest/learning/test_subgradient_ssvm.cxx
deleted file mode 100644
index fd009a0..0000000
--- a/src/unittest/learning/test_subgradient_ssvm.cxx
+++ /dev/null
@@ -1,238 +0,0 @@
-#include <vector>
-
-#include <opengm/functions/explicit_function.hxx>
-#include <opengm/unittests/test.hxx>
-#include <opengm/graphicalmodel/graphicalmodel.hxx>
-#include <opengm/operations/adder.hxx>
-#include <opengm/operations/minimizer.hxx>
-#include <opengm/utilities/metaprogramming.hxx>
-
-#include <opengm/inference/lpcplex.hxx>
-#include <opengm/inference/multicut.hxx>
-#include <opengm/inference/external/trws.hxx>
-
-
-
-#include <opengm/functions/learnable/lpotts.hxx>
-#include <opengm/functions/learnable/lweightedsum_of_functions.hxx>
-#include <opengm/learning/subgradient_ssvm.hxx>
-#include <opengm/learning/loss/hammingloss.hxx>
-//#include <opengm/learning/dataset/testdataset.hxx>
-//#include <opengm/learning/dataset/testdataset2.hxx>
-#include <opengm/learning/dataset/testdatasets.hxx>
-#include <opengm/learning/dataset/editabledataset.hxx>
-
-
-//*************************************
-typedef double ValueType;
-typedef size_t IndexType;
-typedef size_t LabelType; 
-typedef opengm::meta::TypeListGenerator<opengm::ExplicitFunction<ValueType,IndexType,LabelType>, opengm::functions::learnable::LPotts<ValueType,IndexType,LabelType>, opengm::functions::learnable::LWeightedSumOfFunctions<ValueType,IndexType,LabelType> >::type FunctionListType;
-typedef opengm::GraphicalModel<ValueType,opengm::Adder, FunctionListType, opengm::DiscreteSpace<IndexType,LabelType> > GM; 
-typedef opengm::learning::HammingLoss     LOSS;
-
-typedef opengm::Multicut<GM,opengm::Minimizer> Multicut;
-typedef opengm::LPCplex<GM,opengm::Minimizer> INFCPLEX;
-typedef opengm::external::TRWS<GM> INFTRWS;
-
-typedef opengm::datasets::EditableTestDataset<GM,LOSS> EDS;
-typedef opengm::datasets::TestDataset1<GM,LOSS> DS1;
-typedef opengm::datasets::TestDataset2<GM,LOSS> DS2;
-typedef opengm::datasets::TestDatasetSimple<GM,LOSS> DSS;
-
-//*************************************
-
-
-int main() {
-   {
-      DSS dataset(5);
-      std::cout << "Dataset includes " << dataset.getNumberOfModels() << " instances and has " << dataset.getNumberOfWeights() << " parameters."<<std::endl;
-      
-      
-      opengm::learning::SubgradientSSVM<DSS>::Parameter para;
-      para.maxIterations_ = 50;
-      para.C_ = 100.0;
-      para.learningRate_ = 0.1;
-      opengm::learning::SubgradientSSVM<DSS> learner(dataset,para);
-      
-      
-      INFCPLEX::Parameter infPara;
-      infPara.integerConstraint_ = true;
-      learner.learn<INFCPLEX>(infPara); 
-          const DSS::Weights& weights = learner.getWeights();
-          std::cout <<"Weights: ";
-          for (size_t i=0; i<weights.numberOfWeights(); ++i)
-             std::cout << weights[i] <<" ";
-          std::cout <<std::endl;
-   }
-
-   {
-      DS1 dataset(4);
-      std::cout << "Dataset includes " << dataset.getNumberOfModels() << " instances and has " << dataset.getNumberOfWeights() << " parameters."<<std::endl;
-      
-      
-      opengm::learning::SubgradientSSVM<DS1>::Parameter para;
-      para.maxIterations_ = 10;
-      para.C_ = 10.0;
-      para.learningRate_ = 0.01;
-
-      opengm::learning::SubgradientSSVM<DS1> learner(dataset,para);
-      
-      
-      INFTRWS::Parameter infPara;
-      //infPara.integerConstraint_ = true;
-      learner.learn<INFTRWS>(infPara);
-      const DS1::Weights& weights = learner.getWeights();
-      std::cout <<"Weights: ";
-      for (size_t i=0; i<weights.numberOfWeights(); ++i)
-         std::cout << weights[i] <<" ";
-      std::cout <<std::endl;
-      
-   }
-
-   {
-      DS2 dataset(4);
-      std::cout << "Dataset includes " << dataset.getNumberOfModels() << " instances and has " << dataset.getNumberOfWeights() << " parameters."<<std::endl;
-      
-      
-      opengm::learning::SubgradientSSVM<DS2>::Parameter para;
-      para.maxIterations_ = 10;
-      para.C_ = 10.0;
-      para.learningRate_ = 0.01;
-      opengm::learning::SubgradientSSVM<DS2> learner(dataset,para);
-      
-      
-      INFTRWS::Parameter infPara;
-      //infPara.integerConstraint_ = true;
-      learner.learn<INFTRWS>(infPara);
-          const DS2::Weights& weights = learner.getWeights();
-          std::cout <<"Weights: ";
-          for (size_t i=0; i<weights.numberOfWeights(); ++i)
-             std::cout << weights[i] <<" ";
-          std::cout <<std::endl;
-   }
-
-/* ?!?!?
-   {
-        // create editable dataset
-        EDS learningdataset;
-
-        INFTRWS::Parameter infPara;
-
-
-        std::vector< std::vector< LabelType > >GTSolutionVector;
-
-        std::cout << "inference with fixed, arbitrary weights to generate solution" << std::endl;
-
-        EDS::Weights learningWeightVector = learningdataset.getWeights();
-        EDS::Weights randomWeights(learningdataset.getNumberOfWeights());
-
-
-        // opengm::learning::SubgradientSSVM<EDS>::Parameter para0;
-        // para0.optimizerParameter_.lambda = 1;
-        // opengm::learning::SubgradientSSVM<EDS> learner0(learningdataset,para0);
-
-        // // // learn
-        // learner0.learn<INFTRWS>(infPara);
-
-        // std::srand(std::time(0));
-        for (int i = 0; i < learningWeightVector.numberOfWeights(); ++i)
-        {
-            randomWeights[i] = 1.0;
-
-            std::cout << randomWeights[i] << " --->  "  << learningWeightVector[i] << std::endl;
-            learningWeightVector.setWeight(i, randomWeights[i]);//double(std::rand()) / RAND_MAX * 100);
-        }
-
-        for (size_t modelIndex = 0; modelIndex < learningdataset.getNumberOfModels(); modelIndex++)
-        {
-
-            std::cout << "starting inference on GM " << modelIndex << std::endl;
-
-            // INFTRWS inference(learningdataset.getModel(modelIndex), infPara);
-            // inference.infer();
-            // std::vector< LabelType > sol1;
-            
-            // OPENGM_TEST(inference.arg(sol1) == opengm::NORMAL);
-
-            INFTRWS solver(learningdataset.getModel(modelIndex),infPara);
-            solver.infer();
-            std::vector< LabelType > sol1;
-            OPENGM_TEST(solver.arg(sol1) == opengm::NORMAL);
-
-
-            std::cout << "add solution "<< modelIndex <<" to new dataset" << std::endl;
-            learningdataset.setGT(modelIndex,sol1);
-
-            for (size_t j = 0; j < sol1.size(); j++)
-            {
-              std::cout << sol1[j];
-            }
-            std::cout << std::endl;
-            GTSolutionVector.push_back(sol1);
-        }
-
-
-        std::cout << "learn weights (without regularization)" << std::endl;
-
-        std::cout << "weight vector size " << learningdataset.getNumberOfWeights() << std::endl;
-        // Parameter
-        opengm::learning::SubgradientSSVM<EDS>::Parameter para;
-          para.maxIterations_ = 500;
-          para.C_ = 10000.0;
-          para.learningRate_ = 0.1;
-        opengm::learning::SubgradientSSVM<EDS> learner(learningdataset,para);
-
-        // learn
-        learner.learn<INFTRWS>(infPara);
-
-        // get the result
-        const EDS::Weights &learnedParameters = learner.getWeights();
-        std::cout << learnedParameters.numberOfWeights() << std::endl;
-        std::cout << "set learnedParameters as new Weights: ";
-        for (size_t i = 0; i < learnedParameters.numberOfWeights(); ++i)
-        {
-            std::cout << learnedParameters[i] << " ";
-            learningWeightVector.setWeight(i, learnedParameters[i]);
-        }
-        std::cout << std::endl;
-
-        std::cout << "new weights: ";
-        for (int i = 0; i < learningWeightVector.numberOfWeights(); i++)
-        {
-            std::cout << learningWeightVector[i] << ", ";
-        }
-        std::cout << std::endl;
-
-
-        std::cout << "inference with new weights" << std::endl;
-        for (size_t modelIndex = 0; modelIndex < learningdataset.getNumberOfModels(); modelIndex++)
-        {
-            std::cout << "starting inference on GM " << modelIndex << " with learned weights" << std::endl;
-            INFTRWS solver(learningdataset.getModel(modelIndex),infPara);
-            solver.infer();
-            std::vector< LabelType > sol2;
-            OPENGM_TEST(solver.arg(sol2) == opengm::NORMAL);
-            for (size_t j = 0; j < sol2.size(); j++)
-            {
-               std::cout << "sol2["<<j<<"]:" << sol2[j] << "   GTSolutionVector["<<modelIndex<<"]["<<j<<"]:" << GTSolutionVector[modelIndex][j] << std::endl; 
-               OPENGM_TEST(sol2[j] == GTSolutionVector[modelIndex][j]);
-            }
-            // for (size_t j = 0; j < sol2.size(); j++)
-            // {
-            //    std::cout << sol2[j]; 
-            //    // OPENGM_TEST(sol2[j] == GTSolutionVector[modelIndex][j]);
-            // }
-            // std::cout << std::endl<< std::endl;
-            // for (size_t j = 0; j < sol2.size(); j++)
-            // {
-            //    std::cout <<  GTSolutionVector[modelIndex][j]; 
-            //    // OPENGM_TEST(sol2[j] == GTSolutionVector[modelIndex][j]);
-            // }
-            std::cout << "all " << sol2.size() << " solutions are correct" << std::endl;
-        }
-
-    }
-*/
-}
-
diff --git a/src/unittest/test_gm_learning_functions.cxx b/src/unittest/test_gm_learning_functions.cxx
index b797175..4c99ca6 100644
--- a/src/unittest/test_gm_learning_functions.cxx
+++ b/src/unittest/test_gm_learning_functions.cxx
@@ -1,6 +1,7 @@
 #include <vector>
 
 #include <opengm/functions/explicit_function.hxx>
+#include <opengm/functions/l_potts.hxx>
 #include <opengm/functions/potts.hxx>
 #include <opengm/functions/pottsn.hxx>
 #include <opengm/unittests/test.hxx>
@@ -8,7 +9,7 @@
 #include <opengm/operations/multiplier.hxx>
 #include <opengm/inference/bruteforce.hxx>
 #include <opengm/utilities/metaprogramming.hxx>
-#include <opengm/functions/learnable/lpotts.hxx>
+
 
 struct TestFunctor{
 
@@ -40,8 +41,8 @@ struct GraphicalModelTest {
       ValueType, //value type (should be float double or long double)
       opengm::Multiplier, //operator (something like Adder or Multiplier)
       typename opengm::meta::TypeListGenerator<
-         opengm::ExplicitFunction<ValueType,I,L>,
-         opengm::PottsNFunction<ValueType,I,L>
+         opengm::ExplicitFunction<ValueType,I,L>, 
+         opengm::PottsNFunction<ValueType,I,L> 
       >::type, //implicit function functor
       opengm::DiscreteSpace<I, L>
    >  GraphicalModelType;
@@ -54,14 +55,14 @@ struct GraphicalModelTest {
       typedef typename opengm::meta::TypeListGenerator
          <
          opengm::ExplicitFunction<T,I,L>,
-         opengm::functions::learnable::LPotts<T,I,L>
+         opengm::LPottsFunction<T,I,L>
          >::type FunctionTypeList;
       typedef opengm::GraphicalModel<T, opengm::Minimizer, FunctionTypeList, opengm::DiscreteSpace<I, L> > GmType;
       typedef typename GmType::FunctionIdentifier Fid;
 
 
       typedef opengm::ExplicitFunction<T,I,L> EF;
-      typedef opengm::functions::learnable::LPotts<T,I,L> LPF;
+      typedef opengm::LPottsFunction<T,I,L> LPF;
 
 
       // graphical model
@@ -69,12 +70,10 @@ struct GraphicalModelTest {
       GmType gmA(opengm::DiscreteSpace<I, L > (nos, nos + 3));
 
       // parameter
-      const size_t numweights = 1;
-      opengm::learning::Weights<T> weights(numweights);
-      weights.setWeight(0,5.0);
-      std::vector<size_t> weightIds(1, 0);
-      std::vector<T> features(1, 1.0);
-      LPF lPotts(weights,2,weightIds, features);
+      const size_t numparam = 1;
+      opengm::Parameters<T,I> param(numparam);
+      param.setParameter(0,5.0);
+      LPF lPotts(2,2,param,0);
 
 
       I labels00[2]={0,0};
@@ -88,7 +87,7 @@ struct GraphicalModelTest {
       OPENGM_ASSERT_OP(lPotts(labels10),<,5.01);
 
 
-      weights.setWeight(0,3.0);
+      param.setParameter(0,3.0);
 
       OPENGM_ASSERT_OP(lPotts(labels01),>,2.99);
       OPENGM_ASSERT_OP(lPotts(labels01),<,3.01);
diff --git a/src/unittest/test_learnable_functions.cxx b/src/unittest/test_learnable_functions.cxx
deleted file mode 100644
index 012f752..0000000
--- a/src/unittest/test_learnable_functions.cxx
+++ /dev/null
@@ -1,79 +0,0 @@
-#include <vector>
-
-#include "opengm/functions/learnable/lpotts.hxx" 
-#include <opengm/unittests/test.hxx>
-
-template<class T>
-struct LearnableFunctionsTest {
-  typedef size_t LabelType;
-  typedef size_t IndexType;
-  typedef T      ValueType;
-
-  void testLPotts(){
-    std::cout << " * LearnablePotts ..." << std::endl; 
-
-    std::cout  << "    - test basics ..." <<std::flush;
-    // parameter
-    const size_t numparam = 1;
-    opengm::learning::Weights<ValueType> param(numparam);
-    param.setWeight(0,5.0);
-    
-    LabelType numL = 3;
-    std::vector<size_t> pIds(1,0);
-    std::vector<ValueType> feat(1,1);
-    // function
-    opengm::functions::learnable::LPotts<ValueType,IndexType,LabelType> f(param,numL,pIds,feat);
-
-    LabelType l[] ={0,0};
-    for(l[0]=0;l[0]<numL;++l[0]){
-      for(l[1]=0;l[1]<numL;++l[1]){
-	if(l[0]==l[1]){
-	  OPENGM_TEST_EQUAL_TOLERANCE(f(l),0, 0.0001);
-	}else{
-	  OPENGM_TEST_EQUAL_TOLERANCE(f(l),5.0, 0.0001);
-	}
-      }
-    }
-    std::cout << " OK" << std::endl; 
-    std::cout  << "    - test serializations ..." <<std::flush;
-    {
-       typedef  opengm::functions::learnable::LPotts<ValueType,IndexType,LabelType> FUNCTION;
-       const size_t sizeIndices=opengm::FunctionSerialization<FUNCTION>::indexSequenceSize(f);
-       const size_t sizeValues=opengm::FunctionSerialization<FUNCTION>::valueSequenceSize(f);
-       std::vector<long long unsigned> indices(sizeIndices);
-       std::vector<T> values(sizeValues);
-      
-       opengm::FunctionSerialization<FUNCTION>::serialize(f,indices.begin(),values.begin());
-       FUNCTION f2;
-       opengm::FunctionSerialization<FUNCTION>::deserialize(indices.begin(),values.begin(),f2);
-       f2.setWeights(param);
-
-       OPENGM_TEST(f.dimension()==f2.dimension());
-       OPENGM_TEST(f.size() == f2.size());
-       std::vector<size_t> shape(f.dimension());
-       for(size_t i=0;i<f.dimension();++i) {
-          shape[i]=f.shape(i);
-          OPENGM_TEST(f.shape(i)==f2.shape(i));
-       }
-       opengm::ShapeWalker<std::vector<size_t>::const_iterator > walker(shape.begin(),f.dimension());
-       for(size_t i=0;i<f.size();++i) {
-          OPENGM_TEST(walker.coordinateTuple().size()==f.dimension());
-          OPENGM_TEST(f(walker.coordinateTuple().begin())==f2(walker.coordinateTuple().begin()) );
-          ++walker;
-       }
-    }
-    std::cout << " OK" << std::endl; 
-  }
-
-};
-
-
-int main() {
-   std::cout << "Learnable Functions test...  " << std::endl;
-   {
-      LearnableFunctionsTest<double>t;
-      t.testLPotts();
-   }
-   std::cout << "done.." << std::endl;
-   return 0;
-}
diff --git a/src/unittest/test_linear_constraint.cxx b/src/unittest/test_linear_constraint.cxx
index 7521458..aa7a09f 100644
--- a/src/unittest/test_linear_constraint.cxx
+++ b/src/unittest/test_linear_constraint.cxx
@@ -36,7 +36,6 @@ int main(int argc, char** argv){
 
    std::cout << "done..." << std::endl;
    return 0;
-   
 }
 
 template<class VALUE_TYPE, class INDEX_TYPE, class LABEL_TYPE>

-- 
Alioth's /usr/local/bin/git-commit-notice on /srv/git.debian.org/git/debian-science/packages/opengm.git



More information about the debian-science-commits mailing list