[opengm] 330/386: removed py learning

Ghislain Vaillant ghisvail-guest at moszumanska.debian.org
Wed Aug 31 08:38:23 UTC 2016


This is an automated email from the git hooks/post-receive script.

ghisvail-guest pushed a commit to branch debian/master
in repository opengm.

commit d7f408f64e59999d7fdac732b35078bd08d4ea2c
Author: DerThorsten <thorsten.beier at iwr.uni-heidelberg.de>
Date:   Fri Mar 25 12:44:25 2016 +0100

    removed py learning
---
 include/opengm/python/opengmpython.hxx             |   5 -
 src/interfaces/python/opengm/CMakeLists.txt        |   2 +-
 .../python/opengm/learning/CMakeLists.txt          | 151 -----
 src/interfaces/python/opengm/learning/__init__.py  | 726 ---------------------
 src/interfaces/python/opengm/learning/helper.hxx   | 330 ----------
 src/interfaces/python/opengm/learning/learning.cxx |  91 ---
 .../python/opengm/learning/pyDataset.cxx           | 104 ---
 .../python/opengm/learning/pyGridSearchLearner.cxx |  64 --
 .../python/opengm/learning/pyLFunctionGen.cxx      | 309 ---------
 src/interfaces/python/opengm/learning/pyLoss.cxx   |  81 ---
 .../opengm/learning/pyMaxLikelihoodLearner.cxx     |  81 ---
 src/interfaces/python/opengm/learning/pyRws.cxx    |  72 --
 .../opengm/learning/pyStructMaxMarginLearner.cxx   |  64 --
 .../python/opengm/learning/pyStructPerceptron.cxx  |  75 ---
 .../python/opengm/learning/pySubgradientSSVM.cxx   |  80 ---
 .../python/opengm/learning/pyWeights.cxx           |  46 --
 16 files changed, 1 insertion(+), 2280 deletions(-)

diff --git a/include/opengm/python/opengmpython.hxx b/include/opengm/python/opengmpython.hxx
index 4f771f2..2737230 100644
--- a/include/opengm/python/opengmpython.hxx
+++ b/include/opengm/python/opengmpython.hxx
@@ -24,11 +24,6 @@
 #include <opengm/python/numpyview.hxx>
 #include <opengm/python/pythonfunction.hxx>
 
-#include <opengm/learning/dataset/editabledataset.hxx>
-#include <opengm/learning/loss/hammingloss.hxx>
-#include <opengm/learning/loss/generalized-hammingloss.hxx>
-#include <opengm/learning/loss/noloss.hxx>
-#include <opengm/learning/loss/flexibleloss.hxx>
 
 #include <algorithm>
 #include <vector>
diff --git a/src/interfaces/python/opengm/CMakeLists.txt b/src/interfaces/python/opengm/CMakeLists.txt
index 4511e23..65a5c4a 100644
--- a/src/interfaces/python/opengm/CMakeLists.txt
+++ b/src/interfaces/python/opengm/CMakeLists.txt
@@ -37,7 +37,7 @@ include_directories(
 
 add_subdirectory(opengmcore)
 add_subdirectory(inference)
-add_subdirectory(learning)
+#add_subdirectory(learning)
 add_subdirectory(hdf5)
 add_subdirectory(benchmark)
 
diff --git a/src/interfaces/python/opengm/learning/CMakeLists.txt b/src/interfaces/python/opengm/learning/CMakeLists.txt
deleted file mode 100644
index a594ea4..0000000
--- a/src/interfaces/python/opengm/learning/CMakeLists.txt
+++ /dev/null
@@ -1,151 +0,0 @@
-#--------------------------------------------------------------
-# Include Directories
-#--------------------------------------------------------------
-find_package(NUMPY)
-include_directories(
-    ${CMAKE_CURRENT_SOURCE_DIR}
-	 ${PYTHON_INCLUDE_DIRS}
-	 ${PYTHON_NUMPY_INCLUDE_DIR}
-    ${Boost_INCLUDE_DIR}
-    ${Boost_PYTHON_INCLUDE_DIR}
-)
-
-
-
-
-
-#--------------------------------------------------------------
-# Add opengmcore library
-#--------------------------------------------------------------
-set(PY_OPENGM_CORE_SOURCES
-            learning.cxx
-            pyLFunctionGen.cxx
-            pyWeights.cxx
-            pyDataset.cxx
-            pyLoss.cxx
-            pyGridSearchLearner.cxx
-            #pyMaxLikelihoodLearner.cxx
-            pyStructMaxMarginLearner.cxx
-            pySubgradientSSVM.cxx
-            pyStructPerceptron.cxx
-            pyRws.cxx
-            )
-
-if(APPLE)
-    add_library(_learning MODULE ${PY_OPENGM_CORE_SOURCES})
-else()
-    add_library(_learning SHARED ${PY_OPENGM_CORE_SOURCES})
-endif(APPLE)
-
-
-#--------------------------------------------------------------
-# Link libraries
-#--------------------------------------------------------------
-if(OPENMP_FOUND)
-    SET_TARGET_PROPERTIES(_learning PROPERTIES COMPILE_FLAGS "${OpenMP_CXX_FLAGS}")
-    SET_TARGET_PROPERTIES(_learning PROPERTIES LINK_FLAGS "${OpenMP_CXX_FLAGS}")
-endif()
-
-if(MSVC AND NOT(MSVC_VERSION LESS 1400))
-    SET_PROPERTY(TARGET _learning APPEND_STRING PROPERTY COMPILE_FLAGS " /bigobj")
-endif()
-if(APPLE)
-    SET_PROPERTY(TARGET _learning APPEND_STRING PROPERTY LINK_FLAGS " -undefined dynamic_lookup")
-endif(APPLE)
-
-
-if(LINK_RT)
-    find_library(RT rt)
-    target_link_libraries(_learning ${Boost_PYTHON_LIBRARIES} rt)
-else()
-    target_link_libraries(_learning ${Boost_PYTHON_LIBRARIES})
-endif(LINK_RT)
-
-set_target_properties(_learning PROPERTIES PREFIX "")
-
-
-IF(WIN32)
-    SET_TARGET_PROPERTIES(_learning PROPERTIES OUTPUT_NAME "learning"  PREFIX "_" SUFFIX  ".pyd")
-ELSEIF(APPLE)
-    SET_TARGET_PROPERTIES(_learning PROPERTIES OUTPUT_NAME "learning" PREFIX "_" SUFFIX ".so")
-ELSE()
-    SET_TARGET_PROPERTIES(_learning PROPERTIES OUTPUT_NAME "learning"   PREFIX "_")
-ENDIF()
-
-target_link_libraries(_learning  ${HDF5_CORE_LIBRARY} ${HDF5_LIBRARIES} )
-
-
-
-if(WITH_LIBDAI)
-  target_link_libraries(_learning ${LIBDAI_LIBRARY})
-  target_link_libraries(_learning ${GMPXX_LIBRARY})
-  target_link_libraries(_learning ${GMP_LIBRARY}) 
-endif()
-
-if(WITH_QPBO)
-   target_link_libraries(_learning external-library-qpbo-shared)
-endif()
-
-if(WITH_MAXFLOW)
-   target_link_libraries(_learning external-library-maxflow-shared)
-endif()
-
-if(WITH_MAXFLOW_IBFS)
-  target_link_libraries(_learning external-library-maxflow-ibfs-shared)
-endif()
-
-if(WITH_TRWS)
-   target_link_libraries(_learning external-library-trws-shared)
-endif()
-
-
-if(WITH_FASTPD)
-   target_link_libraries(_learning external-library-fastpd-shared)
-endif()
-
-if(WITH_AD3)
-   target_link_libraries(_learning external-library-ad3-shared )
-endif()
-
-#SET(LINK_FLAGS "${LINK_FLAGS} -PIC")
-#SET_TARGET_PROPERTIES(_learning PROPERTIES LINK_FLAGS   "-fPIC")
-#add_definitions(-fPIC)
-
-if(WITH_CONICBUNDLE)
-  #target_link_libraries(_learning ${CONICBUNDLE_LIBRARY})
-endif()
-
-if(WITH_MRF)
-   target_link_libraries(_learning external-library-mrf-shared)
-endif()
-
-
-
-
-if(WITH_CPLEX)
-  if(WIN32)
-      target_link_libraries(_learning wsock32.lib ${CPLEX_ILOCPLEX_LIBRARY} ${CPLEX_LIBRARY} ${CPLEX_CONCERT_LIBRARY})
-   else()
-      target_link_libraries(_learning ${CMAKE_THREAD_LIBS_INIT} ${CPLEX_ILOCPLEX_LIBRARY} ${CPLEX_LIBRARY} ${CPLEX_CONCERT_LIBRARY} )
-    endif()
-endif()
-
-
-if(WITH_GUROBI)
-  target_link_libraries(_learning ${CMAKE_THREAD_LIBS_INIT} 
-    ${GUROBI_LIBRARIES}
-    #${GUOBI_CXX_LIBRARY}  
-    ${CMAKE_THREAD_LIBS_INIT}
-  )
-endif()
-
-#--------------------------------------------------------------
-# Copy from src to build
-#--------------------------------------------------------------
-
-if( ${CMAKE_CURRENT_SOURCE_DIR} STREQUAL  ${CMAKE_CURRENT_BINARY_DIR} )
-   message(STATUS "same src and build dir.")
-else()
-   message(STATUS "copy python-learning files  from src to build" )
-   file(COPY ${CMAKE_CURRENT_SOURCE_DIR}/__init__.py DESTINATION ${CMAKE_CURRENT_BINARY_DIR} )
-endif()
diff --git a/src/interfaces/python/opengm/learning/__init__.py b/src/interfaces/python/opengm/learning/__init__.py
deleted file mode 100644
index f1e80d2..0000000
--- a/src/interfaces/python/opengm/learning/__init__.py
+++ /dev/null
@@ -1,726 +0,0 @@
-from _learning import *
-from _learning import _lunarySharedFeatFunctionsGen,_lpottsFunctionsGen
-import numpy
-import struct
-from opengm import index_type,value_type, label_type, graphicalModel,gridVis
-from opengm import configuration as opengmConfig, LUnaryFunction
-from opengm import to_native_boost_python_enum_converter
-from opengm import Tribool
-#from progressbar import *
-from functools import partial
-
-
-def _extendedGetLoss(self, model_idx, infCls, parameter = None):
-    if parameter is None:
-        import opengm
-        parameter = opengm.InfParam()
-    cppParam  =  infCls.get_cpp_parameter(operator='adder',accumulator='minimizer',parameter=parameter)
-    return self._getLoss(cppParam, model_idx)
-
-def _extendedGetTotalLoss(self, infCls, parameter = None):
-    if parameter is None:
-        import opengm
-        parameter = opengm.InfParam()
-    cppParam  =  infCls.get_cpp_parameter(operator='adder',accumulator='minimizer',parameter=parameter)
-    return self._getTotalLoss(cppParam)
-
-
-
-
-
-
-DatasetWithFlexibleLoss.lossType = 'flexible'
-
-
-class LossParameter(FlexibleLossParameter):
-    def __init__(self, lossType, labelMult=None, nodeMult=None, factorMult=None):
-        super(LossParameter, self).__init__()
-
-        self.lossType = to_native_boost_python_enum_converter(lossType,self.lossType.__class__)
-
-        if labelMult is not None:
-            assert self.lossType == LossType.hamming
-            self.setLabelLossMultiplier(labelMult)
-        if nodeMult is not None:
-            assert self.lossType != LossType.partition
-            self.setNodeLossMultiplier(nodeMult)
-        if factorMult is not None:
-            assert self.lossType == LossType.partition
-            self.setFactorLossMultiplier(factorMult)
-
-
-
-def extend_learn():
-    
-    def learner_learn_normal(self, infCls, parameter = None):
-        if parameter is None:
-            import opengm
-            parameter = opengm.InfParam()
-        cppParam  =  infCls.get_cpp_parameter(operator='adder',accumulator='minimizer',parameter=parameter)
-        try:
-          self._learn(cppParam)
-        except Exception, e:
-            #print "an error ",e,"\n\n"
-            if (str(e).find("did not match C++ signature")):
-                raise RuntimeError("infCls : '%s' is not (yet) exported from c++ to python for learning"%str(infCls))
-
-
-    def learner_learn_reduced_inf(self, infCls, parameter = None, persistency=True, tentacles=False, connectedComponents=False):
-        if parameter is None:
-            import opengm
-            parameter = opengm.InfParam()
-        cppParam  =  infCls.get_cpp_parameter(operator='adder',accumulator='minimizer',parameter=parameter)
-        try:
-          self._learnReducedInf(cppParam, bool(persistency), bool(tentacles),bool(connectedComponents))
-        except Exception, e:
-            #print "an error ",e,"\n\n"
-            if (str(e).find("did not match C++ signature")):
-                raise RuntimeError("infCls : '%s' is not (yet) exported from c++ to python for learning with reduced inference"%str(infCls))
-
-    def learner_learn_reduced_inf_self_fusion(self, infCls, parameter = None, persistency=True, tentacles=False, connectedComponents=False):
-        if parameter is None:
-            import opengm
-            parameter = opengm.InfParam()
-        cppParam  =  infCls.get_cpp_parameter(operator='adder',accumulator='minimizer',parameter=parameter)
-        try:
-          self._learnReducedInf(cppParam, bool(persistency), bool(tentacles),bool(connectedComponents))
-        except Exception, e:
-            #print "an error ",e,"\n\n"
-            if (str(e).find("did not match C++ signature")):
-                raise RuntimeError("infCls : '%s' is not (yet) exported from c++ to python for learning with reduced inference"%str(infCls))
-
-    def learner_learn_self_fusion(self, infCls, parameter = None, fuseNth=1, fusionSolver="qpbo",maxSubgraphSize=2,
-                                  redInf=True, connectedComponents=False, fusionTimeLimit=100.9, numStopIt=10):
-        if parameter is None:
-            import opengm
-            parameter = opengm.InfParam()
-        cppParam  =  infCls.get_cpp_parameter(operator='adder',accumulator='minimizer',parameter=parameter)
-        try:
-          self._learnSelfFusion(cppParam, int(fuseNth),str(fusionSolver),int(maxSubgraphSize),bool(redInf),
-                                bool(connectedComponents),float(fusionTimeLimit),int(numStopIt))
-        except Exception, e:
-            #print "an error ",e,"\n\n"
-            if (str(e).find("did not match C++ signature")):
-                raise RuntimeError("infCls : '%s' is not (yet) exported from c++ to python for learning with self fusion inference"%str(infCls))
-
-    def learner_learn(self, infCls, parameter=None, infMode='normal',**kwargs):
-        assert infMode in ['normal','n','selfFusion','sf','reducedInference','ri','reducedInferenceSelfFusion','risf']
-
-        if infMode in ['normal','n']:
-            self.learnNormal(infCls=infCls, parameter=parameter)
-        elif infMode in ['selfFusion','sf']:
-            self.learnSelfFusion(infCls=infCls, parameter=parameter,**kwargs)
-        elif infMode in ['reducedInference','ri']:
-            self.learnReducedInf(infCls=infCls, parameter=parameter,**kwargs)
-        elif infMode in ['reducedInferenceSelfFusion','risf']:
-            self.learnReducedInfSelfFusion(infCls=infCls, parameter=parameter,**kwargs)
-
-    # all learner classes
-    learnerClss = [GridSearch_FlexibleLoss, StructPerceptron_FlexibleLoss,  
-                  SubgradientSSVM_FlexibleLoss, Rws_FlexibleLoss] 
-    if opengmConfig.withCplex or opengmConfig.withGurobi :
-        learnerClss.append(StructMaxMargin_Bundle_FlexibleLoss)
-
-    for learnerCls in learnerClss:
-        learnerCls.learn = learner_learn
-        learnerCls.learnNormal = learner_learn_normal
-        learnerCls.learnReducedInf = learner_learn_reduced_inf
-        learnerCls.learnSelfFusion = learner_learn_self_fusion
-        learnerCls.learnReducedInfSelfFusion = learner_learn_reduced_inf_self_fusion
-
-extend_learn()
-del extend_learn
-
-
-
-
-
-DatasetWithFlexibleLoss.getLoss = _extendedGetLoss
-DatasetWithFlexibleLoss.getTotalLoss = _extendedGetTotalLoss
-
-
-def createDataset(numWeights,  numInstances=0):
-    w  = Weights(numWeights)
-
-    # if loss not in ['hamming','h','gh','generalized-hamming']:
-    #     raise RuntimeError("loss must be 'hamming' /'h' or 'generalized-hamming'/'gh' ")    
-    # if loss in ['hamming','h']:
-    #     dataset = DatasetWithHammingLoss(int(numInstances))
-    # elif loss in ['generalized-hamming','gh']:
-    #     dataset = DatasetWithGeneralizedHammingLoss(int(numInstances))
-    # else:
-    #     raise RuntimeError("loss must be 'hamming' /'h' or 'generalized-hamming'/'gh' ")   
-    dataset = DatasetWithFlexibleLoss(numInstances)
-    dataset.setWeights(w)
-    weights = dataset.getWeights()
-    for wi in range(numWeights):
-        weights[wi] = 0.0
-    return dataset
-
-
-
-
-def gridSearchLearner(dataset, lowerBounds, upperBounds, nTestPoints):
-    assert dataset.__class__.lossType == 'flexible'
-    learnerCls = GridSearch_FlexibleLoss
-    learnerParamCls = GridSearch_FlexibleLossParameter
-
-    nr = numpy.require 
-    sizeT_type = 'uint64'
-
-    if struct.calcsize("P") * 8 == 32:
-        sizeT_type = 'uint32'
-
-    param = learnerParamCls(nr(lowerBounds,dtype='float64'), nr(upperBounds,dtype='float64'), 
-                           nr(nTestPoints,dtype=sizeT_type))
-
-    learner = learnerCls(dataset, param)
-    return learner
-
-
-def structPerceptron(dataset, learningMode='online',eps=1e-5, maxIterations=10000, stopLoss=0.0, decayExponent=0.0, decayT0=0.0):
-
-    assert dataset.__class__.lossType == 'flexible'
-    learnerCls = StructPerceptron_FlexibleLoss
-    learnerParamCls = StructPerceptron_FlexibleLossParameter
-    learningModeEnum = StructPerceptron_FlexibleLossParameter_LearningMode
-
-    lm = None
-    if learningMode not in ['online','batch']:
-        raise RuntimeError("wrong learning mode, must be 'online' or 'batch' ")
-
-    if learningMode == 'online':
-        lm = learningModeEnum.online
-    if learningMode == 'batch':
-        lm = learningModeEnum.batch
-
-    param = learnerParamCls()
-    param.eps = float(eps)
-    param.maxIterations = int(maxIterations)
-    param.stopLoss = float(stopLoss)
-    param.decayExponent = float(decayExponent)
-    param.decayT0 = float(decayT0)
-    param.learningMode = lm
-    learner = learnerCls(dataset, param)
-    return learner
-
-
-def rws(dataset,eps=1e-5, maxIterations=10000, stopLoss=0.0, learningRate=1.0, C=100.0, sigma=1.0, p=10):
-
-    assert dataset.__class__.lossType == 'flexible'
-    learnerCls = Rws_FlexibleLoss
-    learnerParamCls = Rws_FlexibleLossParameter
-
-
-    param = learnerParamCls()
-    param.eps = float(eps)
-    param.maxIterations = int(maxIterations)
-    param.stopLoss = float(stopLoss)
-    param.learningRate = float(learningRate)
-    param.C = float(C)
-    param.p = int(p)
-    param.sigma = float(sigma)
-    learner = learnerCls(dataset, param)
-    return learner
-
-
-
-def subgradientSSVM(dataset, learningMode='batch',eps=1e-5, maxIterations=10000, stopLoss=0.0, learningRate=1.0, C=100.0, averaging=-1, nConf=0):
-
-    assert dataset.__class__.lossType == 'flexible'
-    learnerCls = SubgradientSSVM_FlexibleLoss
-    learnerParamCls = SubgradientSSVM_FlexibleLossParameter
-    learningModeEnum = SubgradientSSVM_FlexibleLossParameter_LearningMode
-
-    lm = None
-    if learningMode not in ['online','batch']:
-        raise RuntimeError("wrong learning mode, must be 'online' or 'batch' ")
-
-    if learningMode == 'online':
-        lm = learningModeEnum.online
-    if learningMode == 'batch':
-        lm = learningModeEnum.batch
-    param = learnerParamCls()
-    param.eps = float(eps)
-    param.maxIterations = int(maxIterations)
-    param.stopLoss = float(stopLoss)
-    param.learningRate = float(learningRate)
-    param.C = float(C)
-    param.learningMode = lm
-    param.averaging = int(averaging)
-    param.nConf = int(nConf)
-    learner = learnerCls(dataset, param)
-    return learner
-
-def structMaxMarginLearner(dataset, regularizerWeight=1.0, minEps=1e-5, nSteps=0, epsStrategy='change', optimizer='bundle'):
-
-    if opengmConfig.withCplex or opengmConfig.withGurobi :
-        if optimizer != 'bundle':
-            raise RuntimeError("Optimizer type must be 'bundle' for now!")
-
-
-        assert dataset.__class__.lossType == 'flexible'
-        learnerCls = StructMaxMargin_Bundle_FlexibleLoss
-        learnerParamCls = StructMaxMargin_Bundle_FlexibleLossParameter
-
-        epsFromGap = False
-        if epsStrategy == 'gap':
-            epsFromGap = True
-        elif epsStrategy == 'change':
-            epsFromGap = False
-
-        param = learnerParamCls(regularizerWeight, minEps, nSteps, epsFromGap)
-        learner = learnerCls(dataset, param)
-        
-        return learner
-    else:
-        raise RuntimeError("this learner needs withCplex or withGurobi")
-
-
-def maxLikelihoodLearner(
-        dataset, 
-        maximumNumberOfIterations = 100,
-        gradientStepSize = 0.1,
-        weightStoppingCriteria = 0.00000001,
-        gradientStoppingCriteria = 0.00000000001,
-        infoFlag = True,
-        infoEveryStep = False,
-        weightRegularizer = 1.0,
-        beliefPropagationMaximumNumberOfIterations = 40,
-        beliefPropagationConvergenceBound = 0.0001,
-        beliefPropagationDamping = 0.5,
-        beliefPropagationReg = 1.0,
-        beliefPropagationTemperature = 1.0,
-        beliefPropagationIsAcyclic = Tribool(0)
-):
-
-    learnerCls = MaxLikelihood_FlexibleLoss
-    learnerParamCls = MaxLikelihood_FlexibleLossParameter
-
-    param = learnerParamCls(
-        maximumNumberOfIterations,
-        gradientStepSize,
-        weightStoppingCriteria,
-        gradientStoppingCriteria,
-        infoFlag,
-        infoEveryStep,
-        weightRegularizer,
-        beliefPropagationMaximumNumberOfIterations,
-        beliefPropagationConvergenceBound,
-        beliefPropagationDamping,
-        beliefPropagationTemperature,
-        beliefPropagationIsAcyclic
-    )
-    #param.maxIterations = int(maxIterations)
-    #param.reg = float(reg)
-    #param.temperature = float(temp)
-
-    learner = learnerCls(dataset, param)
-        
-    return learner
-
-
-
-
-
-def lUnaryFunction(weights, numberOfLabels, features, weightIds):
-
-    assert numberOfLabels >= 2
-    features = numpy.require(features, dtype=value_type)
-    weightIds = numpy.require(weightIds, dtype=index_type)
-
-    assert features.ndim == weightIds.ndim
-    if features.ndim == 1 or weightIds.ndim == 1:
-        assert numberOfLabels == 2
-        assert features.shape[0]  == weightIds.shape[0]
-        features  = features.reshape(1,-1)
-        weightIds = weightIds.reshape(1,-1)
-
-    assert features.shape[0] in [numberOfLabels, numberOfLabels-1]
-    assert weightIds.shape[0] in [numberOfLabels, numberOfLabels-1]
-    assert features.shape[1]  == weightIds.shape[1]
-
-
-    return LUnaryFunction(weights=weights, numberOfLabels=int(numberOfLabels), 
-                          features=features, weightIds=weightIds)
-
-
-
-
-class FeaturePolicy(object):
-    sharedBetweenLabels = 0
-
-def lUnaryFunctions(weights,numberOfLabels, features, weightIds,
-                    featurePolicy = FeaturePolicy.sharedBetweenLabels, 
-                    **kwargs):
-
-    if (featurePolicy == FeaturePolicy.sharedBetweenLabels ):
-
-        makeFirstEntryConst = kwargs.get('makeFirstEntryConst',False)
-        addConstFeature = kwargs.get('addConstFeature',False)
-
-
-        ff = numpy.require(features, dtype=value_type)
-        wid = numpy.require(weightIds, dtype=index_type)
-
-        assert features.ndim == 2
-        assert weightIds.ndim == 2
-
-
-        res = _lunarySharedFeatFunctionsGen(
-            weights = weights,
-            numFunctions = int(ff.shape[0]),
-            numLabels = int(numberOfLabels),
-            features = ff,
-            weightIds = wid,
-            makeFirstEntryConst = bool(makeFirstEntryConst),
-            addConstFeature = bool(addConstFeature)
-        )
-
-        res.__dict__['_features_'] =features
-        res.__dict__['_ff_'] = ff
-        res.__dict__['_weights_'] =  weights
-
-        return res
-    else :
-        raise RuntimeError("noy yet implemented")
-
-def lPottsFunctions(weights, numberOfLabels, features, weightIds,
-                    addConstFeature = False):
-
-    # check that features has the correct shape
-    if features.ndim != 2:
-        raise RuntimeError("feature must be two-dimensional")
-
-    # check that weights has the correct shape
-    if weightIds.ndim != 1:
-        raise RuntimeError("weightIds must be one-dimensional")
-    if weightIds.shape[0] != features.shape[1] + int(addConstFeature) :
-        raise RuntimeError("weightIds.shape[0]  must be equal to features.shape[1]")
-
-
-
-    # do the c++ call here
-    # which generates a function generator
-
-
-    ff = numpy.require(features, dtype=value_type)
-    wid = numpy.require(weightIds, dtype=index_type)
-    res =  _lpottsFunctionsGen(
-        weights=weights,
-        numFunctions=long(features.shape[0]),
-        numLabels=long(numberOfLabels),
-        features=ff,
-        weightIds=wid,
-        addConstFeature=bool(addConstFeature)
-    )
-
-    res.__dict__['_features_'] = wid
-    res.__dict__['_weights_'] = ff
-    return res
-
-
-
-
-
-
-
-# def getPbar(size, name):
-#     widgets = ['%s: '%name, Percentage(), ' ', Bar(marker='0',left='[',right=']'),
-#                ' ', ETA(), ' ', FileTransferSpeed()] #see docs for other options
-#     pbar = ProgressBar(widgets=widgets, maxval=size)
-#     return pbar
-
-def secondOrderImageDataset(imgs, gts, numberOfLabels, fUnary, fBinary, addConstFeature, trainFraction=0.75):
-    #try:
-    #    import vigra
-    #    from progressbar import *
-    #except:
-    #    pass
-
-    # train test
-    nImg = len(imgs)
-    nTrain = int(float(nImg)*trainFraction+0.5)
-    nTest = (nImg-nTrain)
-    
-    def getFeat(fComp, im):
-        res = []
-        for f in fComp:
-            r = f(im)
-            if r.ndim == 2:
-                r = r[:,:, None]
-            res.append(r)
-        return res
-
-    # compute features for a single image
-    tImg = imgs[0]
-    unaryFeat = getFeat(fUnary, tImg)
-    unaryFeat = numpy.nan_to_num(numpy.concatenate(unaryFeat,axis=2).view(numpy.ndarray))
-    nUnaryFeat = unaryFeat.shape[-1] + int(addConstFeature)
-    nUnaryFeat *= numberOfLabels - int(numberOfLabels==2)
-
-    if len(fBinary)>0:
-        binaryFeat = getFeat(fBinary, tImg)
-        binaryFeat = numpy.nan_to_num(numpy.concatenate(binaryFeat,axis=2).view(numpy.ndarray))
-        nBinaryFeat = binaryFeat.shape[-1] + int(addConstFeature)
-        nWeights  = nUnaryFeat + nBinaryFeat
-    else:
-        nBinaryFeat = 0
-    print "------------------------------------------------"
-    print "nTrain",nTrain,"nTest",nTest
-    print "nWeights",nWeights,"(",nUnaryFeat,nBinaryFeat,")"
-    print "------------------------------------------------"
-
-    train_set = []
-    tentative_test_set = []
-
-    for i,(img,gt) in enumerate(zip(imgs,gts)):
-        if(i<nTrain):
-            train_set.append((img,gt))
-        else:
-            tentative_test_set.append((img,gt))
-
-
-    dataset = createDataset(numWeights=nWeights)
-    weights = dataset.getWeights()
-    uWeightIds = numpy.arange(nUnaryFeat ,dtype='uint64')
-    if numberOfLabels != 2:
-        uWeightIds = uWeightIds.reshape([numberOfLabels,-1])
-    else:
-        uWeightIds = uWeightIds.reshape([1,-1])
-    bWeightIds = numpy.arange(start=nUnaryFeat,stop=nWeights,dtype='uint64')
-
-    def makeModel(img,gt):
-        shape = gt.shape[0:2]
-        numVar = shape[0] * shape[1]
-
-        # make model
-        gm = graphicalModel(numpy.ones(numVar)*numberOfLabels)
-
-
-
-
-        # compute features
-        unaryFeat = getFeat(fUnary, img)
-        unaryFeat = numpy.nan_to_num(numpy.concatenate(unaryFeat,axis=2).view(numpy.ndarray))
-        unaryFeat  = unaryFeat.reshape([numVar,-1])
-        
-
-
-
-        # add unaries
-        lUnaries = lUnaryFunctions(weights =weights,numberOfLabels = numberOfLabels, 
-                                    features=unaryFeat, weightIds = uWeightIds,
-                                    featurePolicy= FeaturePolicy.sharedBetweenLabels,
-                                    makeFirstEntryConst=numberOfLabels==2, addConstFeature=addConstFeature)
-        fids = gm.addFunctions(lUnaries)
-        gm.addFactors(fids, numpy.arange(numVar))
-
-
-        if len(fBinary)>0:
-            binaryFeat = getFeat(fBinary, img)
-            binaryFeat = numpy.nan_to_num(numpy.concatenate(binaryFeat,axis=2).view(numpy.ndarray))
-            binaryFeat  = binaryFeat.reshape([numVar,-1])
-
-            # add second order
-            vis2Order=gridVis(shape[0:2],True)
-
-            fU = binaryFeat[vis2Order[:,0],:]
-            fV = binaryFeat[vis2Order[:,1],:]
-            fB  = (fU +  fV / 2.0)
-            
-            lp = lPottsFunctions(weights=weights, numberOfLabels=numberOfLabels,
-                                          features=fB, weightIds=bWeightIds,
-                                          addConstFeature=addConstFeature)
-            gm.addFactors(gm.addFunctions(lp), vis2Order) 
-
-        return gm
-
-    # make training models
-    # pbar = getPbar(nTrain,"Training Models")
-    # pbar.start()
-    for i,(img,gt) in enumerate(train_set):
-        gm = makeModel(img, gt)
-        dataset.pushBackInstance(gm,gt.reshape(-1).astype(label_type))
-        # pbar.update(i)
-    # pbar.finish()
-
-
-    # make test models
-    test_set = []
-    # pbar = getPbar(nTest,"Test Models")
-    # pbar.start()
-    for i,(img,gt) in enumerate(tentative_test_set):
-        gm = makeModel(img, gt)
-        test_set.append((img, gt, gm))
-    #     pbar.update(i)
-    # pbar.finish()
-
-    return dataset, test_set
-
-
-
-def superpixelDataset(imgs,sps, gts, numberOfLabels, fUnary, fBinary, addConstFeature, trainFraction=0.75):
-    try:
-        import vigra
-    except:
-        raise ImportError("cannot import vigra which is needed for superpixelDataset")
-
-    # train test
-    nImg = len(imgs)
-    nTrain = int(float(nImg)*trainFraction+0.5)
-    nTest = (nImg-nTrain)
-    
-    def getFeat(fComp, im, topoShape=False):
-        res = []
-        if(topoShape):
-            shape = im.shape[0:2]
-            tshape = [2*s-1 for s in shape]
-            iiimg = vigra.sampling.resize(im, tshape)
-        else:
-            iiimg = im
-        for f in fComp:
-            r = f(iiimg)
-            if r.ndim == 2:
-                r = r[:,:, None]
-            res.append(r)
-        return res
-
-    # compute features for a single image
-    tImg = imgs[0]
-    unaryFeat = getFeat(fUnary, tImg)
-    unaryFeat = numpy.nan_to_num(numpy.concatenate(unaryFeat,axis=2).view(numpy.ndarray))
-    nUnaryFeat = unaryFeat.shape[-1] + int(addConstFeature)
-    nUnaryFeat *= numberOfLabels - int(numberOfLabels==2)
-    if len(fBinary)>0:
-        binaryFeat = getFeat(fBinary, tImg)
-        binaryFeat = numpy.nan_to_num(numpy.concatenate(binaryFeat,axis=2).view(numpy.ndarray))
-        nBinaryFeat = binaryFeat.shape[-1] + int(addConstFeature)
-    else:
-        nBinaryFeat =0
-
-    nWeights  = nUnaryFeat + nBinaryFeat
-
-    print "------------------------------------------------"
-    print "nTrain",nTrain,"nTest",nTest
-    print "nWeights",nWeights,"(",nUnaryFeat,nBinaryFeat,")"
-    print "------------------------------------------------"
-
-    train_set = []
-    tentative_test_set = []
-
-    for i,(img,sp,gt) in enumerate(zip(imgs,sps,gts)):
-        if(i<nTrain):
-            train_set.append((img,sp,gt))
-        else:
-            tentative_test_set.append((img,sp,gt))
-
-
-    dataset = createDataset(numWeights=nWeights)
-    weights = dataset.getWeights()
-    uWeightIds = numpy.arange(nUnaryFeat ,dtype='uint64')
-    if numberOfLabels != 2:
-        uWeightIds = uWeightIds.reshape([numberOfLabels,-1])
-    else:
-        uWeightIds = uWeightIds.reshape([1,-1])
-
-    if len(fBinary)>0:
-        bWeightIds = numpy.arange(start=nUnaryFeat,stop=nWeights,dtype='uint64')
-
-
-
-
-
-    def makeModel(img,sp,gt):
-        assert sp.min() == 0
-        shape = img.shape[0:2]
-        gg = vigra.graphs.gridGraph(shape)
-        rag = vigra.graphs.regionAdjacencyGraph(gg,sp)
-        numVar = rag.nodeNum
-        assert rag.nodeNum == rag.maxNodeId +1
-
-        # make model
-        gm = graphicalModel(numpy.ones(numVar)*numberOfLabels)
-
-        assert gm.numberOfVariables == rag.nodeNum 
-        assert gm.numberOfVariables == rag.maxNodeId +1
-
-        # compute features
-        unaryFeat = getFeat(fUnary, img)
-        unaryFeat = numpy.nan_to_num(numpy.concatenate(unaryFeat,axis=2).view(numpy.ndarray)).astype('float32')
-        unaryFeat = vigra.taggedView(unaryFeat,'xyc')
-        accList = []
-
-        #for c in range(unaryFeat.shape[-1]):
-        #    cUnaryFeat = unaryFeat[:,:,c]
-        #    cAccFeat = rag.accumulateNodeFeatures(cUnaryFeat)[:,None]
-        #    accList.append(cAccFeat)
-        #accUnaryFeat = numpy.concatenate(accList,axis=1)
-        accUnaryFeat = rag.accumulateNodeFeatures(unaryFeat)#[:,None]
-
-
-        #print accUnaryFeat.shape
-
-        #accUnaryFeat = rag.accumulateNodeFeatures(unaryFeat[:,:,:])
-        #accUnaryFeat = vigra.taggedView(accUnaryFeat,'nc')
-        #accUnaryFeat = accUnaryFeat[1:accUnaryFeat.shape[0],:]
-
-      
-
-
-
-        #binaryFeat  = binaryFeat.reshape([numVar,-1])
-
-
-
-        # add unaries
-        lUnaries = lUnaryFunctions(weights =weights,numberOfLabels = numberOfLabels, 
-                                            features=accUnaryFeat, weightIds = uWeightIds,
-                                            featurePolicy= FeaturePolicy.sharedBetweenLabels,
-                                            makeFirstEntryConst=numberOfLabels==2, addConstFeature=addConstFeature)
-        fids = gm.addFunctions(lUnaries)
-        gm.addFactors(fids, numpy.arange(numVar))
-
-        
-        if len(fBinary)>0:
-            binaryFeat = getFeat(fBinary, img, topoShape=False)
-            binaryFeat = numpy.nan_to_num(numpy.concatenate(binaryFeat,axis=2).view(numpy.ndarray)).astype('float32')
-            edgeFeat = vigra.graphs.edgeFeaturesFromImage(gg, binaryFeat)
-            accBinaryFeat = rag.accumulateEdgeFeatures(edgeFeat)
-
-            uvIds =  numpy.sort(rag.uvIds(), axis=1)
-            assert uvIds.min()==0
-            assert uvIds.max()==gm.numberOfVariables-1
-
-
-
-        
-            lp = lPottsFunctions(weights=weights, numberOfLabels=numberOfLabels,
-                                          features=accBinaryFeat, weightIds=bWeightIds,
-                                          addConstFeature=addConstFeature)
-            fids = gm.addFunctions(lp)
-            gm.addFactors(fids, uvIds) 
-
-        return gm
-
-    # make training models
-    # pbar = getPbar(nTrain,"Training Models")
-    # pbar.start()
-    for i,(img,sp,gt) in enumerate(train_set):
-        gm = makeModel(img,sp, gt)
-        dataset.pushBackInstance(gm,gt.astype(label_type))
-        pbar.update(i)
-    pbar.finish()
-
-
-    # make test models
-    test_set = []
-    # pbar = getPbar(nTest,"Test Models")
-    # pbar.start()
-    for i,(img,sp,gt) in enumerate(tentative_test_set):
-        gm = makeModel(img,sp, gt)
-        test_set.append((img, sp, gm))
-    #     pbar.update(i)
-    # pbar.finish()
-
-    return dataset, test_set
diff --git a/src/interfaces/python/opengm/learning/helper.hxx b/src/interfaces/python/opengm/learning/helper.hxx
deleted file mode 100644
index e95034a..0000000
--- a/src/interfaces/python/opengm/learning/helper.hxx
+++ /dev/null
@@ -1,330 +0,0 @@
-#ifndef HELPER_HXX
-#define HELPER_HXX
-
-#include <boost/python.hpp>
-#include <boost/python/module.hpp>
-#include <opengm/python/opengmpython.hxx>
-#include <opengm/python/converter.hxx>
-#include <opengm/python/numpyview.hxx>
-
-#include <opengm/inference/icm.hxx>
-#include <opengm/inference/lazyflipper.hxx>
-#include <opengm/inference/self_fusion.hxx>
-#include <opengm/learning/gridsearch-learning.hxx>
-#include <opengm/inference/messagepassing/messagepassing.hxx>
-
-#ifdef WITH_CPLEX
-#include <opengm/inference/lpcplex.hxx>
-#include <opengm/inference/multicut.hxx>
-#endif
-
-#ifdef WITH_QPBO
-#include <opengm/inference/external/qpbo.hxx>
-#include <opengm/inference/reducedinference.hxx>
-#endif
-
-#ifdef WITH_TRWS
-#include <opengm/inference/external/trws.hxx>
-#endif
-
-
-namespace opengm{
-
-template<class LEARNER>
-class LearnerInferenceSuite: public boost::python::def_visitor<LearnerInferenceSuite<LEARNER> >{
-public:
-    friend class boost::python::def_visitor_access;
-
-    LearnerInferenceSuite(){
-
-    }
-
-    template<class INF>
-    static void pyLearn_Inf(LEARNER & learner, const typename INF::Parameter & param)
-    {
-        learner. template learn<INF>(param);
-    }
-
-    #ifdef WITH_QPBO
-    template<class INF>
-    static void pyLearn_ReducedInf(
-        LEARNER & learner, 
-        const typename INF::Parameter & param,
-        const bool persistency,
-        const bool tentacles,
-        const bool connectedComponents
-    )
-    {
-
-        typedef typename INF::GraphicalModelType GmType;
-        typedef typename opengm::ReducedInferenceHelper<GmType>::InfGmType RedInfGm;
-
-        // rebind the inference to the RedInfGm
-        typedef typename INF:: template RebindGm<RedInfGm>::type RedInfRebindInf;
-
-
-        typedef typename RedInfRebindInf::Parameter RedInfRebindInfParam;
-        typedef opengm::ReducedInference<GmType, opengm::Minimizer, RedInfRebindInf> RedInf;
-        typedef typename RedInf::Parameter RedInfParam;
-
-        RedInfRebindInfParam redInfRebindInfParam(param);
-
-        RedInfParam redInfPara;
-        redInfPara.subParameter_ = redInfRebindInfParam;
-        redInfPara.Persistency_ = persistency;
-        redInfPara.Tentacle_ = tentacles;
-        redInfPara.ConnectedComponents_ = connectedComponents;
-
-        learner. template learn<RedInf>(redInfPara);
-    }
-    #endif
-
-
-    #ifdef WITH_QPBO
-    template<class INF>
-    static void pyLearn_ReducedInfSelfFusion(
-        LEARNER & learner, 
-        const typename INF::Parameter & param,
-        const bool persistency,
-        const bool tentacles,
-        const bool connectedComponents
-    )
-    {
-
-        typedef typename INF::GraphicalModelType GmType;
-        typedef typename opengm::ReducedInferenceHelper<GmType>::InfGmType RedInfGm;
-
-        // rebind the inference to the RedInfGm
-        typedef typename INF:: template RebindGm<RedInfGm>::type RedInfRebindInf;
-
-
-        typedef typename RedInfRebindInf::Parameter RedInfRebindInfParam;
-        typedef opengm::ReducedInference<GmType, opengm::Minimizer, RedInfRebindInf> RedInf;
-        typedef typename RedInf::Parameter RedInfParam;
-
-        RedInfRebindInfParam redInfRebindInfParam(param);
-
-        RedInfParam redInfPara;
-        redInfPara.subParameter_ = redInfRebindInfParam;
-        redInfPara.Persistency_ = persistency;
-        redInfPara.Tentacle_ = tentacles;
-        redInfPara.ConnectedComponents_ = connectedComponents;
-
-
-        typedef opengm::SelfFusion<RedInf> SelfFusionInf;
-        typedef typename SelfFusionInf::Parameter SelfFusionInfParam;
-        SelfFusionInfParam sfParam;
-
-        sfParam.infParam_ = redInfPara;
-        sfParam.fuseNth_ = 10;
-        sfParam.maxSubgraphSize_ = 2;
-        sfParam.reducedInf_ = true;
-        sfParam.tentacles_ = false;
-        sfParam.connectedComponents_ = true;
-        sfParam.fusionTimeLimit_ = 100.0;
-        sfParam.numStopIt_ = 10.0;
-        sfParam.fusionSolver_ = SelfFusionInf::QpboFusion;
-
-        learner. template learn<SelfFusionInf>(sfParam);
-    }
-    #endif
-
-
-    template<class INF>
-    static void pyLearn_SelfFusion(
-        LEARNER & learner, 
-        const typename INF::Parameter & param,
-        const size_t fuseNth,
-        const std::string & fusionSolver,
-        const UInt64Type maxSubgraphSize,
-        const bool reducedInf,
-        const bool connectedComponents,
-        const double fusionTimeLimit,
-        const size_t numStopIt
-    )
-    {
-
-        typedef typename INF::GraphicalModelType GmType;
-        
-        typedef opengm::SelfFusion<INF> SelfFusionInf;
-        typedef typename SelfFusionInf::Parameter SelfFusionInfParam;
-
-
-        SelfFusionInfParam sfParam;
-
-        if(fusionSolver ==std::string("qpbo")){
-            sfParam.fusionSolver_ = SelfFusionInf::QpboFusion;
-        }
-        else if(fusionSolver ==std::string("cplex")){
-            sfParam.fusionSolver_ = SelfFusionInf::CplexFusion;
-        }
-        else if(fusionSolver ==std::string("lf")){
-            sfParam.fusionSolver_ = SelfFusionInf::LazyFlipperFusion;
-        }
-
-        sfParam.infParam_ = param;
-        sfParam.fuseNth_ = fuseNth;
-        sfParam.maxSubgraphSize_ = maxSubgraphSize;
-        sfParam.reducedInf_ = reducedInf;
-        sfParam.tentacles_ = false;
-        sfParam.connectedComponents_ = connectedComponents;
-        sfParam.fusionTimeLimit_ = fusionTimeLimit;
-        sfParam.numStopIt_ = numStopIt;
-
-        learner. template learn<SelfFusionInf>(sfParam);
-    }
-
-
-
-
-
-
-
-    template <class classT>
-    void visit(classT& c) const{
-        // SOME INFERENCE METHODS
-        typedef typename LEARNER::GMType GMType;
-        typedef typename LEARNER::Parameter PyLearnerParam;
-        typedef typename LEARNER::DatasetType DatasetType;
-        typedef opengm::Minimizer ACC;
-
-        typedef opengm::ICM<GMType, ACC> IcmInf;
-        typedef opengm::LazyFlipper<GMType, ACC> LazyFlipperInf;
-        typedef opengm::BeliefPropagationUpdateRules<GMType, ACC> UpdateRulesType;
-        typedef opengm::MessagePassing<GMType, ACC, UpdateRulesType, opengm::MaxDistance> BpInf;
-
-        #ifdef WITH_CPLEX
-            typedef opengm::LPCplex<GMType, ACC> Cplex;
-            typedef opengm::Multicut<GMType, ACC> Multicut;
-        #endif
-
-        #ifdef WITH_QPBO
-            typedef opengm::external::QPBO<GMType>  QpboExternal;
-        #endif
-
-        #ifdef WITH_TRWS
-            typedef opengm::external::TRWS<GMType>  TrwsExternal;
-        #endif
-
-        c
-            //.def("_learn",&pyLearn_Inf<IcmInf>)
-            //.def("_learn",&pyLearn_Inf<LazyFlipperInf>)
-            //.def("_learn",&pyLearn_Inf<BpInf>)
-            #ifdef WITH_CPLEX
-            //.def("_learn",&pyLearn_Inf<Cplex>) 
-            .def("_learn",&pyLearn_Inf<Multicut>)
-            #endif
-            #ifdef WITH_QPBO
-            .def("_learn",&pyLearn_Inf<QpboExternal>)
-            #endif
-            #ifdef WITH_TRWS
-            .def("_learn",&pyLearn_Inf<TrwsExternal>)
-            #endif
-
-            #if 0
-            // REDUCED INFERENCE
-            #ifdef WITH_QPBO
-                .def("_learnReducedInf",&pyLearn_ReducedInf<LazyFlipperInf>)
-                #ifdef WITH_TRWS
-                .def("_learnReducedInf",&pyLearn_ReducedInf<TrwsExternal>)
-                #endif
-                #ifdef WITH_CPLEX
-                .def("_learnReducedInf",&pyLearn_ReducedInf<Cplex>)
-                #endif
-            #endif
-
-            // SELF FUSION
-            #ifdef WITH_TRWS
-            .def("_learnSelfFusion",&pyLearn_SelfFusion<TrwsExternal>)
-            #endif
-
-            // REDUCED INFERNCE SELF FUSION
-            #if defined(WITH_TRWS) && defined(WITH_QPBO)
-            .def("_learnReducedInfSelfFusion",&pyLearn_ReducedInfSelfFusion<TrwsExternal>)
-            #endif
-            #endif
-        ;
-    }
-};
-
-
-
-template<class DS>
-class DatasetInferenceSuite: public boost::python::def_visitor<DatasetInferenceSuite<DS> >{
-public:
-   friend class boost::python::def_visitor_access;
-
-   DatasetInferenceSuite(){
-
-   }
-
-   template<class INF>
-   static typename DS::ValueType pyGetLossWithInf(DS & ds, const typename INF::Parameter & param, const size_t i)
-   {
-       return ds. template getLoss<INF>(param, i);
-   }
-
-   template<class INF>
-   static typename DS::ValueType pyGetTotalLossWithInf(DS & ds, const typename INF::Parameter & param)
-   {
-       return ds. template getTotalLoss<INF>(param);
-   }
-
-   template <class classT>
-   void visit(classT& c) const{
-       // SOME INFERENCE METHODS
-       typedef typename DS::GMType GMType;
-       typedef opengm::Minimizer ACC;
-
-       typedef opengm::ICM<GMType, ACC> IcmInf;
-       typedef opengm::LazyFlipper<GMType, ACC> LazyFlipperInf;
-       typedef opengm::BeliefPropagationUpdateRules<GMType, ACC> UpdateRulesType;
-       typedef opengm::MessagePassing<GMType, ACC, UpdateRulesType, opengm::MaxDistance> BpInf;
-
-#ifdef WITH_CPLEX
-       typedef opengm::LPCplex<GMType, ACC> Cplex;
-       typedef opengm::Multicut<GMType, ACC> Multicut;
-#endif
-#ifdef WITH_QPBO
-       typedef opengm::external::QPBO<GMType>  QpboExternal;
-#endif
-#ifdef WITH_TRWS
-       typedef opengm::external::TRWS<GMType>  TrwsExternal;
-#endif
-
-
-
-
-
-
-      c
-          .def("_getLoss",&pyGetLossWithInf<IcmInf>)
-          .def("_getTotalLoss",&pyGetTotalLossWithInf<IcmInf>)
-          .def("_getLoss",&pyGetLossWithInf<LazyFlipperInf>)
-          .def("_getTotalLoss",&pyGetTotalLossWithInf<LazyFlipperInf>)
-          .def("_getLoss",&pyGetLossWithInf<BpInf>)
-          .def("_getTotalLoss",&pyGetTotalLossWithInf<BpInf>)
-#ifdef WITH_CPLEX
-          .def("_getLoss",&pyGetLossWithInf<Cplex>)
-          .def("_getTotalLoss",&pyGetTotalLossWithInf<Cplex>)
-          .def("_getLoss",&pyGetLossWithInf<Multicut>)
-          .def("_getTotalLoss",&pyGetTotalLossWithInf<Multicut>)
-#endif
-#ifdef WITH_QPBO
-          .def("_getLoss",&pyGetLossWithInf<QpboExternal>)
-          .def("_getTotalLoss",&pyGetTotalLossWithInf<QpboExternal>)
-#endif
-#ifdef WITH_TRWS
-          .def("_getLoss",&pyGetLossWithInf<TrwsExternal>)
-          .def("_getTotalLoss",&pyGetTotalLossWithInf<TrwsExternal>)
-#endif
-      ;
-   }
-};
-
-
-
-} // namespace opengm
-
-#endif // HELPER_HXX
-
diff --git a/src/interfaces/python/opengm/learning/learning.cxx b/src/interfaces/python/opengm/learning/learning.cxx
deleted file mode 100644
index 195b2ac..0000000
--- a/src/interfaces/python/opengm/learning/learning.cxx
+++ /dev/null
@@ -1,91 +0,0 @@
-#include <boost/python.hpp>
-#include <stddef.h>
-#include <boost/python/suite/indexing/vector_indexing_suite.hpp>
-#include <opengm/python/opengmpython.hxx>
-#include <opengm/python/converter.hxx>
-#include <opengm/python/numpyview.hxx>
-#include <opengm/python/pythonfunction.hxx>
-
-#include <opengm/learning/loss/hammingloss.hxx>
-#include <opengm/learning/loss/generalized-hammingloss.hxx>
-#include <opengm/learning/loss/noloss.hxx>
-#include <opengm/learning/loss/flexibleloss.hxx>
-
-#if defined(WITH_CPLEX) || defined(WITH_GUROBI)
-#include <opengm/learning/bundle-optimizer.hxx>
-#endif
-
-
-namespace bp = boost::python;
-namespace op = opengm::python;
-namespace ol = opengm::learning;
-
-namespace opengm{
-
-    void export_weights();
-    void export_weight_constraints();
-
-    template<class GM, class LOSS>
-    void export_dataset(const std::string& className);
-
-    template<class GM>
-    void export_loss();
-
-    template<class DATASET>
-    void export_grid_search_learner(const std::string & clsName);
-
-    template<class DATASET, class OPTIMIZER>
-    void export_struct_max_margin_bundle_learner(const std::string & clsName);
-
-    //template<class DATASET>
-    //void export_max_likelihood_learner(const std::string & clsName);
-
-    template<class DATASET>
-    void export_struct_perceptron_learner(const std::string & clsName);
-
-    template<class DATASET>
-    void export_subgradient_ssvm_learner(const std::string & clsName);
-
-    template<class DATASET>
-    void export_rws_learner(const std::string & clsName);
-
-    template<class GM_ADDER,class GM_MULT>  
-    void export_lfunction_generator();
-
-
-}
-
-
-
-BOOST_PYTHON_MODULE_INIT(_learning) {
-
-
-    Py_Initialize();
-    PyEval_InitThreads();
-    bp::numeric::array::set_module_and_type("numpy", "ndarray");
-    bp::docstring_options doc_options(true,true,false);
-
-
-    opengm::export_weights();
-    opengm::export_weight_constraints();
-    // function exporter
-    opengm::export_lfunction_generator<op::GmAdder,op::GmMultiplier>();
-
-    // export loss
-    opengm::export_loss<op::GmAdder>();
-
-    // templated datasets
-    opengm::export_dataset<op::GmAdder, ol::FlexibleLoss >("DatasetWithFlexibleLoss");
-
-
-
-    opengm::export_grid_search_learner<op::GmAdderFlexibleLossDataset>("GridSearch_FlexibleLoss");
-    opengm::export_struct_perceptron_learner<op::GmAdderFlexibleLossDataset>("StructPerceptron_FlexibleLoss");
-    opengm::export_subgradient_ssvm_learner<op::GmAdderFlexibleLossDataset>("SubgradientSSVM_FlexibleLoss");
-    //opengm::export_max_likelihood_learner<op::GmAdderFlexibleLossDataset>("MaxLikelihood_FlexibleLoss");
-    opengm::export_rws_learner<op::GmAdderFlexibleLossDataset>("Rws_FlexibleLoss");
-    
-    #if defined(WITH_CPLEX) || defined(WITH_GUROBI)
-        opengm::export_struct_max_margin_bundle_learner< op::GmAdderFlexibleLossDataset, ol::BundleOptimizer<op::GmValueType> >("StructMaxMargin_Bundle_FlexibleLoss");
-    #endif
-}
diff --git a/src/interfaces/python/opengm/learning/pyDataset.cxx b/src/interfaces/python/opengm/learning/pyDataset.cxx
deleted file mode 100644
index 5d8068a..0000000
--- a/src/interfaces/python/opengm/learning/pyDataset.cxx
+++ /dev/null
@@ -1,104 +0,0 @@
-#include <boost/python.hpp>
-#include <boost/python/suite/indexing/vector_indexing_suite.hpp>
-#include <stdexcept>
-#include <stddef.h>
-
-#include <opengm/learning/dataset/editabledataset.hxx>
-#include <opengm/learning/dataset/dataset_io.hxx>
-#include <opengm/learning/loss/hammingloss.hxx>
-#include <opengm/learning/loss/generalized-hammingloss.hxx>
-#include <opengm/learning/loss/noloss.hxx>
-#include <opengm/python/numpyview.hxx>
-#include <opengm/python/opengmpython.hxx>
-#include <opengm/python/converter.hxx>
-
-#define DefaultErrorFn DefaultErrorFn_TrwsExternal_DS
-#include "helper.hxx"
-
-using namespace boost::python;
-
-namespace opengm{
-
-template<class GM, class LOSS>
-void pySetInstanceWithLossParam(opengm::datasets::EditableDataset<GM, LOSS>& ds,
-                   const size_t i,
-                   const GM& gm,
-                   const opengm::python::NumpyView<typename GM::LabelType,1>  gt,
-                   const typename LOSS::Parameter & param) {
-    std::vector<typename GM::LabelType> gt_vector(gt.begin(), gt.end());
-    ds.setInstance(i, gm, gt_vector, param);
-}
-
-template<class GM, class LOSS>
-void pySetInstance(opengm::datasets::EditableDataset<GM, LOSS>& ds,
-                   const size_t i,
-                   const GM& gm,
-                   const opengm::python::NumpyView<typename GM::LabelType,1>& gt
-                   ) {
-    pySetInstanceWithLossParam(ds, i, gm, gt, typename LOSS::Parameter());
-}
-
-template<class GM, class LOSS>
-void pyPushBackInstanceWithLossParam(opengm::datasets::EditableDataset<GM,LOSS>& ds,
-                        const GM& gm,
-                        const opengm::python::NumpyView<typename GM::LabelType,1>& gt,
-                        const typename LOSS::Parameter & param) {
-    std::vector<typename GM::LabelType> gt_vector(gt.begin(), gt.end());
-    ds.pushBackInstance(gm, gt_vector, param);
-}
-
-template<class GM, class LOSS>
-void pyPushBackInstance(opengm::datasets::EditableDataset<GM,LOSS>& ds,
-                        const GM& gm,
-                        const opengm::python::NumpyView<typename GM::LabelType,1>& gt
-                        ) {
-    pyPushBackInstanceWithLossParam(ds, gm, gt, typename LOSS::Parameter());
-}
-
-template<class GM, class LOSS>
-void pySaveDataset(opengm::datasets::EditableDataset<GM,LOSS >& ds,
-                   const std::string datasetpath,
-                   const std::string prefix) {
-    opengm::datasets::DatasetSerialization::save(ds, datasetpath, prefix);
-}
-
-template<class GM, class LOSS>
-void pyLoadDataset(opengm::datasets::EditableDataset<GM,LOSS >& ds,
-                   const std::string datasetpath,
-                   const std::string prefix) {
-    opengm::datasets::DatasetSerialization::loadAll(datasetpath, prefix, ds);
-}
-
-template<class GM, class LOSS>
-void export_dataset(const std::string& className){
-    typedef opengm::datasets::EditableDataset<GM,LOSS > PyDataset;
-
-   class_<PyDataset > (className.c_str(),init<size_t>())
-           .def("lockModel", &PyDataset::lockModel)
-           .def("unlockModel", &PyDataset::unlockModel)
-           .def("getModel", &PyDataset::getModel, return_internal_reference<>())
-           .def("getModelWithLoss", &PyDataset::getModelWithLoss, return_internal_reference<>())
-           .def("getGT", &PyDataset::getGT, return_internal_reference<>())
-           .def("getWeights", &PyDataset::getWeights, return_internal_reference<>())
-           .def("getNumberOfWeights", &PyDataset::getNumberOfWeights)
-           .def("getNumberOfModels", &PyDataset::getNumberOfModels)
-           .def("setInstance", &pySetInstance<GM,LOSS>)
-           .def("setInstanceWithLossParam", &pySetInstanceWithLossParam<GM,LOSS>)
-           .def("setInstance", &pySetInstanceWithLossParam<GM,LOSS>)
-           .def("pushBackInstance", &pyPushBackInstance<GM,LOSS>)
-           .def("pushBackInstanceWithLossParam", &pyPushBackInstanceWithLossParam<GM,LOSS>)
-           .def("pushBackInstance", &pyPushBackInstanceWithLossParam<GM,LOSS>)
-           .def("setWeights", &PyDataset::setWeights)
-           .def("save", &pySaveDataset<GM, LOSS>)
-           .def("load", &pyLoadDataset<GM, LOSS>)
-           .def(DatasetInferenceSuite<PyDataset>())
-   ;
-
-}
-
-
-//template void export_dataset<opengm::python::GmAdder, opengm::learning::HammingLoss> (const std::string& className);
-//template void export_dataset<opengm::python::GmAdder, opengm::learning::NoLoss> (const std::string& className);
-template void export_dataset<opengm::python::GmAdder, opengm::learning::FlexibleLoss> (const std::string& className);
-
-}
diff --git a/src/interfaces/python/opengm/learning/pyGridSearchLearner.cxx b/src/interfaces/python/opengm/learning/pyGridSearchLearner.cxx
deleted file mode 100644
index 412117c..0000000
--- a/src/interfaces/python/opengm/learning/pyGridSearchLearner.cxx
+++ /dev/null
@@ -1,64 +0,0 @@
-#include <boost/python.hpp>
-#include <boost/python/module.hpp>
-#include <opengm/python/opengmpython.hxx>
-#include <opengm/python/converter.hxx>
-#include <opengm/python/numpyview.hxx>
-
-#define DefaultErrorFn DefaultErrorFn_TrwsExternalB
-#include "helper.hxx"
-
-namespace bp = boost::python;
-namespace op = opengm::python;
-namespace ol = opengm::learning;
-
-namespace opengm{
-
-
-    template<class PARAM>
-    PARAM * pyGridSearchParamConstructor(
-        op::NumpyView<double> lowerBound,
-        op::NumpyView<double> upperBound,
-        op::NumpyView<size_t> nTestPoints
-    ){
-        PARAM * p  = new PARAM();
-        p->parameterUpperbound_.assign(lowerBound.begin(), lowerBound.end());
-        p->parameterLowerbound_.assign(upperBound.begin(), upperBound.end());
-        p->testingPoints_.assign(nTestPoints.begin(), nTestPoints.end());
-        return p;
-    }
-
-    template<class L >
-    L * pyGridSearchConstructor(
-        typename L::DatasetType & dataset,
-        const typename L::Parameter & param
-    ){
-        L * l  = new L(dataset, param);
-        return l;
-    }
-
-    template<class DATASET>
-    void export_grid_search_learner(const std::string & clsName){
-        typedef learning::GridSearchLearner<DATASET> PyLearner;
-        typedef typename PyLearner::Parameter PyLearnerParam;
-
-        const std::string paramClsName = clsName + std::string("Parameter");
-
-
-        bp::class_<PyLearnerParam>(paramClsName.c_str(), bp::init<>())
-            .def("__init__", make_constructor(&pyGridSearchParamConstructor<PyLearnerParam> ,boost::python::default_call_policies()))
-        ;
-
-        bp::class_<PyLearner>( clsName.c_str(), bp::no_init )
-        .def("__init__", make_constructor(&pyGridSearchConstructor<PyLearner> ,boost::python::default_call_policies()))
-        .def(LearnerInferenceSuite<PyLearner>())
-        ;
-    }
-
-    template void 
-    export_grid_search_learner<op::GmAdderFlexibleLossDataset> (const std::string& className);
-
-    //template void 
-    //export_grid_search_learner<op::GmAdderGeneralizedHammingLossDataset> (const std::string& className);
-}
-
-
diff --git a/src/interfaces/python/opengm/learning/pyLFunctionGen.cxx b/src/interfaces/python/opengm/learning/pyLFunctionGen.cxx
deleted file mode 100644
index e8bb186..0000000
--- a/src/interfaces/python/opengm/learning/pyLFunctionGen.cxx
+++ /dev/null
@@ -1,309 +0,0 @@
-#include <boost/python.hpp>
-#include <boost/python/module.hpp>
-#include <opengm/python/opengmpython.hxx>
-#include <opengm/python/converter.hxx>
-#include <opengm/python/numpyview.hxx>
-
-
-#include "opengm/graphicalmodel/weights.hxx"
-#include "opengm/functions/learnable/lpotts.hxx"
-#include "opengm/functions/learnable/lunary.hxx"
-#include "opengm/functions/learnable/lweightedsum_of_functions.hxx"
-
-#include "../opengmcore/functionGenBase.hxx"
-
-
-namespace bp = boost::python;
-namespace op = opengm::python;
-namespace ol = opengm::learning;
-namespace ofl = opengm::functions::learnable;
-namespace opengm{
-
-
-
-    template<class GM_ADDER,class GM_MULT>
-    class LPottsFunctionGen :
-    public FunctionGeneratorBase<GM_ADDER,GM_MULT>
-    {
-    public:       
-        typedef typename GM_ADDER::ValueType ValueType;
-        typedef typename GM_ADDER::IndexType IndexType;
-        typedef typename GM_ADDER::LabelType LabelType;
-        typedef ol::Weights<ValueType> WeightType;
-        typedef  ofl::LPotts<ValueType, IndexType, LabelType> FType;
-
-        LPottsFunctionGen(
-            WeightType & weights,
-            const size_t numFunctions,
-            const size_t numLabels,
-            op::NumpyView<ValueType, 2> features,
-            op::NumpyView<IndexType, 1> weightIds,
-            const bool addConstFeature
-        ):
-        FunctionGeneratorBase<GM_ADDER,GM_MULT>(),
-        weights_(weights),
-        numFunctions_(numFunctions),
-        numLabels_(numLabels),
-        features_(features.view()),
-        weightIds_(weightIds.begin(), weightIds.end()),
-        addConstFeature_(addConstFeature)
-        {
-            OPENGM_CHECK_OP(features.shape(0), == , numFunctions, "wrong shape");
-            OPENGM_CHECK_OP(features.shape(1)+int(addConstFeature), == , weightIds.shape(0), "wrong shape");
-        }
- 
-
-        template<class GM>
-        std::vector< typename GM::FunctionIdentifier > * addFunctionsGeneric(GM & gm)const{
-
-            typedef typename GM::FunctionIdentifier Fid;
-            typedef std::vector<Fid> FidVector;
-            FidVector * fidVector = new FidVector(numFunctions_);
-
-            const size_t nFeat =features_.shape(1);
-            std::vector<ValueType> fFeat(nFeat+int(addConstFeature_));
-            for(size_t  i=0;i<numFunctions_;++i){
-                for(size_t f=0; f<nFeat; ++f){
-                    fFeat[f] = features_(i,f);
-                }
-                if(addConstFeature_){
-                    fFeat[nFeat] = 1.0;
-                }
-                const FType f(weights_, numLabels_, weightIds_, fFeat);
-                (*fidVector)[i] = gm.addFunction(f);
-            }   
-            return fidVector;
-        }
-
-        virtual std::vector< typename GM_ADDER::FunctionIdentifier > * addFunctions(GM_ADDER & gm)const{
-            return this-> template addFunctionsGeneric<GM_ADDER>(gm);
-        }
-        virtual std::vector< typename GM_MULT::FunctionIdentifier >  * addFunctions(GM_MULT & gm)const{
-            throw RuntimeError("Wrong Operator for Learning");
-            return NULL;
-        }
-    private:
-        WeightType & weights_;
-        size_t numFunctions_;
-        size_t numLabels_;
-        marray::Marray<ValueType>  features_;
-        std::vector<size_t>  weightIds_; 
-        bool addConstFeature_;
-    };
-
-
-
-    template<class GM_ADDER,class GM_MULT>
-    class LUnarySharedFeatFunctionGen :
-    public FunctionGeneratorBase<GM_ADDER,GM_MULT>
-    {
-    public:       
-        typedef typename GM_ADDER::ValueType ValueType;
-        typedef typename GM_ADDER::IndexType IndexType;
-        typedef typename GM_ADDER::LabelType LabelType;
-        typedef ol::Weights<ValueType> WeightType;
-        typedef  ofl::LUnary<ValueType, IndexType, LabelType> FType;
-
-        LUnarySharedFeatFunctionGen(
-            WeightType & weights,
-            const size_t numFunctions,
-            const size_t numLabels,
-            op::NumpyView<ValueType, 2> & features,
-            op::NumpyView<IndexType, 2> & weightIds,
-            const bool makeFirstEntryConst,
-            const bool addConstFeature
-        ):
-        FunctionGeneratorBase<GM_ADDER,GM_MULT>(),
-        weights_(weights),
-        numFunctions_(numFunctions),
-        numLabels_(numLabels),
-        features_(features.view()),
-        //weightIds_(weightIds),
-        makeFirstEntryConst_(makeFirstEntryConst),
-        addConstFeature_(addConstFeature)
-        {
-            //std::cout<<"constructor\n";
-
-            //std::cout<<"    features (1000,1)"<<features(1000,1)<<"\n";
-            //std::cout<<"    features_(1000,1)"<<features_(1000,1)<<"\n";
-            OPENGM_CHECK_OP(features.shape(0), == , numFunctions, "wrong shape");
-            OPENGM_CHECK_OP(weightIds.shape(1), == , features.shape(1) + int(addConstFeature), "wrong shape");
-            OPENGM_CHECK_OP(weightIds.shape(0)+int(makeFirstEntryConst), == ,numLabels, "wrong shape");
-
-
-            const size_t nFeat =features_.shape(1);
-            const size_t nWPerL = nFeat+int(addConstFeature_);
-            const size_t wShape[2] = {numLabels_- int(makeFirstEntryConst_) ,nWPerL};
-
-            wIds_ = marray::Marray<size_t>(wShape, wShape+2);
-
-            //std::cout<<"assignment\n";
-            //std::cout<<"passed wi shape "<<weightIds.shape(0)<<" "<<weightIds.shape(1)<<" given "<<wShape[0]<<" "<<wShape[1]<<"\n";
-            //std::cout<<"wIds_  shape "<<wIds_.shape(0)<<" "<<wIds_.shape(1)<<"\n";
-
-            for(size_t ll=0; ll<wShape[0]; ++ll){
-                for(size_t wi=0; wi<wShape[1]; ++wi){
-                    //std::cout<<"ll "<<ll<<" wi "<<wi<<"\n";
-                    size_t passed =  weightIds(ll,wi);
-                    //std::cout<<"passed "<<passed<<"\n";
-                    wIds_(ll,wi) = passed;
-                }  
-            }
-            //std::cout<<"constructor done\n";
-        }
- 
-
-        template<class GM>
-        std::vector< typename GM::FunctionIdentifier > * addFunctionsGeneric(GM & gm)const{
-            //std::cout<<"&** features_(1000,1)"<<features_(1000,1)<<"\n";
-
-
-
-            typedef typename GM::FunctionIdentifier Fid;
-            typedef std::vector<Fid> FidVector;
-            FidVector * fidVector = new FidVector(numFunctions_);
-
-
-            const size_t nFeat =features_.shape(1);
-            const size_t nWPerL = nFeat+int(addConstFeature_);
-            marray::Marray<ValueType> fFeat(&nWPerL,&nWPerL+1);
-
-
-            // copy the weights once!
-            const size_t wShape[2] = {numLabels_- int(makeFirstEntryConst_) ,nWPerL};
-            marray::Marray<size_t> _weightIds(wShape, wShape+2);
-
-            //for(size_t ll=0; ll<wShape[0]; ++ll)
-            //for(size_t wi=0; wi<wShape[1]; ++wi){
-            //    _weightIds(ll,wi) = weightIds_(ll,wi);
-            //}    
-
-
-            for(size_t  i=0;i<numFunctions_;++i){
-                // copy the features for that instance
-                for(size_t f=0; f<nFeat; ++f){
-                    //std::cout<<"added feat:"<<features_(i,f)<<"\n";
-                    fFeat(f) = features_(i,f);
-                }
-                if(addConstFeature_){
-                    fFeat(nFeat) = 1.0;
-                }
-                FType f(weights_, numLabels_, wIds_, fFeat, makeFirstEntryConst_);
-
-                //std::cout<<"INTERNAL TEST\n";
-                //for(size_t l=0;l<numLabels_; ++l){
-                //    std::cout<<"l "<<l<<" f(l) = "<<f(&l)<<"\n";
-                //}
-
-                (*fidVector)[i] = gm.addFunction(f);
-            }   
-            return fidVector;
-        }
-
-        virtual std::vector< typename GM_ADDER::FunctionIdentifier > * addFunctions(GM_ADDER & gm)const{
-            return this-> template addFunctionsGeneric<GM_ADDER>(gm);
-        }
-        virtual std::vector< typename GM_MULT::FunctionIdentifier >  * addFunctions(GM_MULT & gm)const{
-            throw RuntimeError("Wrong Operator for Learning");
-            return NULL;
-        }
-    private:
-        WeightType & weights_;
-        size_t numFunctions_;
-        size_t numLabels_;
-
-        marray::Marray<ValueType> features_;
-        //op::NumpyView<ValueType, 2>  features_;
-        op::NumpyView<IndexType, 2>  weightIds_;
-        bool makeFirstEntryConst_;
-        bool addConstFeature_;
-        marray::Marray<size_t> wIds_;
-    };
-
-
-    template<class GM_ADDER,class GM_MULT>
-    FunctionGeneratorBase<GM_ADDER,GM_MULT> * lunarySharedFeatFunctionGen(
-        ol::Weights<typename GM_ADDER::ValueType> & weights,
-        const size_t numFunctions,
-        const size_t numLabels,
-        opengm::python::NumpyView<typename GM_ADDER::ValueType,2> features,
-        opengm::python::NumpyView<typename GM_ADDER::IndexType,2> weightIds,
-        const bool makeFirstEntryConst,
-        const bool addConstFeature
-    ){
-        FunctionGeneratorBase<GM_ADDER,GM_MULT> * ptr = 
-            new LUnarySharedFeatFunctionGen<GM_ADDER,GM_MULT>(weights,numFunctions,numLabels,
-                                                              features,weightIds,makeFirstEntryConst,
-                                                              addConstFeature);
-        return ptr;
-    }
-
-
-    template<class GM_ADDER,class GM_MULT>
-    FunctionGeneratorBase<GM_ADDER,GM_MULT> * lpottsFunctionGen(
-        ol::Weights<typename GM_ADDER::ValueType> & weights,
-        const size_t numFunctions,
-        const size_t numLabels,
-        opengm::python::NumpyView<typename GM_ADDER::ValueType,2> features,
-        opengm::python::NumpyView<typename GM_ADDER::IndexType,1> weightIds,
-        const bool addConstFeature
-    ){
-        FunctionGeneratorBase<GM_ADDER,GM_MULT> * ptr = 
-            new LPottsFunctionGen<GM_ADDER,GM_MULT>(weights,numFunctions,numLabels,features,weightIds, addConstFeature);
-        return ptr;
-    }
-
-
-
-
-
-
-
-
-
-
-
-
-    template<class GM_ADDER,class GM_MULT>  
-    void export_lfunction_generator(){
-        typedef LPottsFunctionGen<GM_ADDER, GM_MULT> FGen;
-
-         bp::def("_lpottsFunctionsGen",&lpottsFunctionGen<GM_ADDER,GM_MULT>,
-                bp::return_value_policy<bp::manage_new_object>(),
-            (
-                bp::arg("weights"),
-                bp::arg("numFunctions"),
-                bp::arg("numLabels"),
-                bp::arg("features"),
-                bp::arg("weightIds"),
-                bp::arg("addConstFeature")
-            )
-        );
-
-         bp::def("_lunarySharedFeatFunctionsGen",&lunarySharedFeatFunctionGen<GM_ADDER,GM_MULT>,
-                bp::with_custodian_and_ward_postcall<0, 4, bp::return_value_policy<bp::manage_new_object> >(),
-            (
-                bp::arg("weights"),
-                bp::arg("numFunctions"),
-                bp::arg("numLabels"),
-                bp::arg("features"),
-                bp::arg("weightIds"),
-                bp::arg("makeFirstEntryConst"),
-                bp::arg("addConstFeature")
-            )
-        );
-
-    }
-
-
-
-
-
-
-
-
-
-}
-
-
-template void opengm::export_lfunction_generator<op::GmAdder,op::GmMultiplier>();
diff --git a/src/interfaces/python/opengm/learning/pyLoss.cxx b/src/interfaces/python/opengm/learning/pyLoss.cxx
deleted file mode 100644
index 951559e..0000000
--- a/src/interfaces/python/opengm/learning/pyLoss.cxx
+++ /dev/null
@@ -1,81 +0,0 @@
-#include <boost/python.hpp>
-#include <boost/python/suite/indexing/vector_indexing_suite.hpp>
-#include <stdexcept>
-#include <stddef.h>
-
-//#include <opengm/learning/loss/hammingloss.hxx>
-//#include <opengm/learning/loss/generalized-hammingloss.hxx>
-#include <opengm/learning/loss/flexibleloss.hxx>
-#include <opengm/python/opengmpython.hxx>
-#include <opengm/python/converter.hxx>
-
-using namespace boost::python;
-
-namespace opengm{
-    
-void pySetNodeLossMultiplier(opengm::learning::FlexibleLoss::Parameter& p,
-                             const opengm::python::NumpyView<double,1>& m)
-{
-    p.nodeLossMultiplier_ = std::vector<double>(m.begin(), m.end());
-}
-
-void pySetLabelLossMultiplier(opengm::learning::FlexibleLoss::Parameter& p,
-                             const opengm::python::NumpyView<double,1>& m)
-{
-    p.labelLossMultiplier_ = std::vector<double>(m.begin(), m.end());
-}
-void pySetFactorLossMultiplier(opengm::learning::FlexibleLoss::Parameter& p,
-                               const opengm::python::NumpyView<double,1>& m)
-{
-    p.labelLossMultiplier_ = std::vector<double>(m.begin(), m.end());
-}
-
-
-template <class GM>
-void export_loss(){
-   typedef typename std::vector<typename GM::LabelType>::const_iterator Literator;
-   typedef typename std::vector<typename GM::LabelType>::const_iterator Niterator;
-   typedef opengm::learning::HammingLoss PyHammingLoss;
-   typedef opengm::learning::FlexibleLoss PyFlexibleLoss;
-   typedef opengm::learning::GeneralizedHammingLoss PyGeneralizedHammingLoss;
-   typedef opengm::learning::NoLoss PyNoLoss;
-
-
-
-
-
-
-    class_<PyFlexibleLoss >("FlexibleLoss")
-        //.def("loss", &PyHammingLoss::loss<const GM &, Literator,Literator>)
-        //.def("addLoss", &PyHammingLoss::addLoss<GM, Literator>)
-    ;
-
-    // learner param enum
-    enum_<PyFlexibleLoss::Parameter::LossType>("LossType")
-      .value("hamming", PyFlexibleLoss::Parameter::Hamming)
-      .value("l1",  PyFlexibleLoss::Parameter::L1)
-      .value("l2",  PyFlexibleLoss::Parameter::L2)
-      .value("partition",  PyFlexibleLoss::Parameter::Partition)
-      .value("ConfMat",  PyFlexibleLoss::Parameter::ConfMat)
-    ;
-
-
-    class_<PyFlexibleLoss::Parameter>("FlexibleLossParameter")
-        .def_readwrite("lossType", &PyFlexibleLoss::Parameter::lossType_)
-        .def("setNodeLossMultiplier", &pySetNodeLossMultiplier)
-        .def("setLabelLossMultiplier", &pySetLabelLossMultiplier)
-        .def("setFactorLossMultiplier", &pySetFactorLossMultiplier)
-    ;
-
-
-    class_<std::vector< PyFlexibleLoss::Parameter > >("FlexibleLossParameterVector")
-        .def(vector_indexing_suite<std::vector< PyFlexibleLoss::Parameter> >())
-    ;
-
-
-}
-
-
-template void export_loss<opengm::python::GmAdder>();
-
-}
diff --git a/src/interfaces/python/opengm/learning/pyMaxLikelihoodLearner.cxx b/src/interfaces/python/opengm/learning/pyMaxLikelihoodLearner.cxx
deleted file mode 100644
index 82fc5d0..0000000
--- a/src/interfaces/python/opengm/learning/pyMaxLikelihoodLearner.cxx
+++ /dev/null
@@ -1,81 +0,0 @@
-#if defined(WITH_CPLEX) || defined(WITH_GUROBI)
-
-#include <boost/python.hpp>
-#include <boost/python/module.hpp>
-#include <opengm/python/opengmpython.hxx>
-#include <opengm/python/converter.hxx>
-#include <opengm/python/numpyview.hxx>
-
-#include <opengm/inference/icm.hxx>
-#include <opengm/learning/maximum_likelihood_learning.hxx>
-
-#define DefaultErrorFn DefaultErrorFn_TrwsExternal_ML
-#include "helper.hxx"
-
-namespace bp = boost::python;
-namespace op = opengm::python;
-namespace ol = opengm::learning;
-
-namespace opengm{
-
-
-    template<class PARAM>
-    PARAM * pyMaxLikelihoodParamConstructor(
-	size_t maximumNumberOfIterations=100,
-	double gradientStepSize=0.1,
-	double weightStoppingCriteria=0.00000001,
-	double gradientStoppingCriteria=0.00000001,
-	bool infoFlag=true,
-	bool infoEveryStep=false,
-	double weightRegularizer = 1.0,
-	size_t beliefPropagationMaximumNumberOfIterations = 20,
-	double beliefPropagationConvergenceBound = 0.0001,
-	double beliefPropagationDamping = 0.5,
-	double beliefPropagationTemperature = 0.3,
-	opengm::Tribool beliefPropagationIsAcyclic=opengm::Tribool(opengm::Tribool::Maybe)
-    ){
-        PARAM * p  = new PARAM();
-	p->maximumNumberOfIterations_ = maximumNumberOfIterations;
-	p->gradientStepSize_ = gradientStepSize;
-	p->weightStoppingCriteria_ = weightStoppingCriteria;
-	p->gradientStoppingCriteria_ = gradientStoppingCriteria;
-	p->infoFlag_ = infoFlag;
-	p->infoEveryStep_ = infoEveryStep;
-	p->weightRegularizer_ = weightRegularizer;
-	p->beliefPropagationMaximumNumberOfIterations_ = beliefPropagationMaximumNumberOfIterations;
-	p->beliefPropagationConvergenceBound_ = beliefPropagationConvergenceBound;
-	p->beliefPropagationDamping_ = beliefPropagationDamping;
-	p->beliefPropagationTemperature_ = beliefPropagationTemperature;
-	p->beliefPropagationIsAcyclic_ = beliefPropagationIsAcyclic;
-        return p;
-    }
-
-    template<class DATASET>
-    void export_max_likelihood_learner(const std::string & clsName){
-        typedef learning::MaximumLikelihoodLearner<DATASET> PyLearner;
-        typedef typename PyLearner::Parameter PyLearnerParam;
-        typedef typename PyLearner::DatasetType DatasetType;
-
-        const std::string paramClsName = clsName + std::string("Parameter");
-
-        bp::class_<PyLearnerParam>(paramClsName.c_str(), bp::init<>())
-	  .def("__init__", make_constructor(&pyMaxLikelihoodParamConstructor<PyLearnerParam> ,boost::python::default_call_policies()))
-	  //.def_readwrite("maxIterations", &PyLearnerParam::maximumNumberOfIterations_)
-        ;
-
-        boost::python::class_<PyLearner>( clsName.c_str(), boost::python::init<DatasetType &, const PyLearnerParam &>() )
-            .def("learn",&PyLearner::learn)
-        ;
-    }
-
-  //template void
-  //export_max_likelihood_learner<op::GmAdderHammingLossDataset> (const std::string& className);
-
-    template void
-    export_max_likelihood_learner<op::GmAdderFlexibleLossDataset> (const std::string& className);
-}
-
-
-
-#endif
-
diff --git a/src/interfaces/python/opengm/learning/pyRws.cxx b/src/interfaces/python/opengm/learning/pyRws.cxx
deleted file mode 100644
index 43bdaf9..0000000
--- a/src/interfaces/python/opengm/learning/pyRws.cxx
+++ /dev/null
@@ -1,72 +0,0 @@
-#include <boost/python.hpp>
-#include <boost/python/module.hpp>
-#include <opengm/python/opengmpython.hxx>
-#include <opengm/python/converter.hxx>
-#include <opengm/python/numpyview.hxx>
-#include <opengm/learning/rws.hxx>
-
-#define DefaultErrorFn DefaultErrorFn_TrwsExternalRws
-#include "helper.hxx"
-
-namespace bp = boost::python;
-namespace op = opengm::python;
-namespace ol = opengm::learning;
-
-namespace opengm{
-
-
-    template<class PARAM>
-    PARAM * pyRwsParamConstructor(
-    ){
-        PARAM * p  = new PARAM();
-        return p;
-    }
-
-    template<class L >
-    L * pyRwsConstructor(
-        typename L::DatasetType & dataset,
-        const typename L::Parameter & param
-    ){
-        L * l  = new L(dataset, param);
-        return l;
-    }
-
-    template<class DATASET>
-    void export_rws_learner(const std::string & clsName){
-        typedef learning::Rws<DATASET> PyLearner;
-        typedef typename PyLearner::Parameter PyLearnerParam;
-
-        const std::string paramClsName = clsName + std::string("Parameter");
-
-
-        // learner param
-        bp::class_<PyLearnerParam>(paramClsName.c_str(), bp::init<>())
-            .def("__init__", make_constructor(&pyRwsParamConstructor<PyLearnerParam> ,boost::python::default_call_policies()))
-            .def_readwrite("eps",  &PyLearnerParam::eps_)
-            .def_readwrite("maxIterations", &PyLearnerParam::maxIterations_)
-            .def_readwrite("stopLoss", &PyLearnerParam::stopLoss_)
-            .def_readwrite("learningRate", &PyLearnerParam::learningRate_)
-            .def_readwrite("C", &PyLearnerParam::C_)
-            .def_readwrite("p", &PyLearnerParam::p_)
-            .def_readwrite("sigma", &PyLearnerParam::sigma_)
-        ;
-
-
-        // learner
-        bp::class_<PyLearner>( clsName.c_str(), bp::no_init )
-        .def("__init__", make_constructor(&pyRwsConstructor<PyLearner> ,boost::python::default_call_policies()))
-        .def(LearnerInferenceSuite<PyLearner>())
-        ;
-    }
-
-    // template void 
-    // export_subgradient_ssvm_learner<op::GmAdderHammingLossDataset> (const std::string& className);
-
-    // template void 
-    // export_subgradient_ssvm_learner<op::GmAdderGeneralizedHammingLossDataset> (const std::string& className);
-
-    template void 
-    export_rws_learner<op::GmAdderFlexibleLossDataset> (const std::string& className);
-}
-
-
diff --git a/src/interfaces/python/opengm/learning/pyStructMaxMarginLearner.cxx b/src/interfaces/python/opengm/learning/pyStructMaxMarginLearner.cxx
deleted file mode 100644
index e8d5ba7..0000000
--- a/src/interfaces/python/opengm/learning/pyStructMaxMarginLearner.cxx
+++ /dev/null
@@ -1,64 +0,0 @@
-#if defined(WITH_CPLEX) || defined(WITH_GUROBI)
-
-#include <boost/python.hpp>
-#include <boost/python/module.hpp>
-#include <opengm/python/opengmpython.hxx>
-#include <opengm/python/converter.hxx>
-#include <opengm/python/numpyview.hxx>
-
-#include <opengm/inference/icm.hxx>
-#include <opengm/learning/struct-max-margin.hxx>
-
-#define DefaultErrorFn DefaultErrorFn_TrwsExternal_SMM
-#include "helper.hxx"
-
-namespace bp = boost::python;
-namespace op = opengm::python;
-namespace ol = opengm::learning;
-
-namespace opengm{
-
-
-    template<class PARAM>
-    PARAM * pyStructMaxMarginBundleParamConstructor(
-        double regularizerWeight,
-        op::GmValueType minEps,
-        unsigned int steps,
-        bool eps_from_gap = true
-    ){
-        PARAM * p  = new PARAM();
-        p->optimizerParameter_.lambda  = regularizerWeight;
-        p->optimizerParameter_.min_eps = minEps;
-        p->optimizerParameter_.steps   = steps;
-        if(eps_from_gap)
-            p->optimizerParameter_.epsStrategy = ol::BundleOptimizer<op::GmValueType>::EpsFromGap;
-        else
-            p->optimizerParameter_.epsStrategy = ol::BundleOptimizer<op::GmValueType>::EpsFromChange;
-        return p;
-    }
-
-    template<class DATASET, class OPTIMIZER>
-    void export_struct_max_margin_bundle_learner(const std::string & clsName){
-        typedef learning::StructMaxMargin<DATASET, OPTIMIZER> PyLearner;
-        typedef typename PyLearner::Parameter PyLearnerParam;
-        typedef typename PyLearner::DatasetType DatasetType;
-
-        const std::string paramClsName = clsName + std::string("Parameter");
-
-        bp::class_<PyLearnerParam>(paramClsName.c_str(), bp::init<>())
-            .def("__init__", make_constructor(&pyStructMaxMarginBundleParamConstructor<PyLearnerParam> ,boost::python::default_call_policies()))
-        ;
-
-        boost::python::class_<PyLearner>( clsName.c_str(), boost::python::init<DatasetType &, const PyLearnerParam &>() )
-            .def(LearnerInferenceSuite<PyLearner>())
-        ;
-    }
-
-    template void
-    export_struct_max_margin_bundle_learner<op::GmAdderFlexibleLossDataset, ol::BundleOptimizer<op::GmValueType> > (const std::string& className);
-
-}
-
-
-
-#endif
diff --git a/src/interfaces/python/opengm/learning/pyStructPerceptron.cxx b/src/interfaces/python/opengm/learning/pyStructPerceptron.cxx
deleted file mode 100644
index 6e3633e..0000000
--- a/src/interfaces/python/opengm/learning/pyStructPerceptron.cxx
+++ /dev/null
@@ -1,75 +0,0 @@
-#include <boost/python.hpp>
-#include <boost/python/module.hpp>
-#include <opengm/python/opengmpython.hxx>
-#include <opengm/python/converter.hxx>
-#include <opengm/python/numpyview.hxx>
-#include <opengm/learning/structured_perceptron.hxx>
-
-#define DefaultErrorFn DefaultErrorFn_TrwsExternalSPerceptron
-#include "helper.hxx"
-
-namespace bp = boost::python;
-namespace op = opengm::python;
-namespace ol = opengm::learning;
-
-namespace opengm{
-
-
-    template<class PARAM>
-    PARAM * pyStructuredPerceptronParamConstructor(
-    ){
-        PARAM * p  = new PARAM();
-        return p;
-    }
-
-    template<class L >
-    L * pyStructuredPerceptronConstructor(
-        typename L::DatasetType & dataset,
-        const typename L::Parameter & param
-    ){
-        L * l  = new L(dataset, param);
-        return l;
-    }
-
-    template<class DATASET>
-    void export_struct_perceptron_learner(const std::string & clsName){
-        typedef learning::StructuredPerceptron<DATASET> PyLearner;
-        typedef typename PyLearner::Parameter PyLearnerParam;
-
-        const std::string paramClsName = clsName + std::string("Parameter");
-
-        const std::string paramEnumLearningModeName = clsName + std::string("Parameter_LearningMode");
-
-        // learner param enum
-        bp::enum_<typename PyLearnerParam::LearningMode>(paramEnumLearningModeName.c_str())
-            .value("online", PyLearnerParam::Online)
-            .value("batch", PyLearnerParam::Batch)
-        ;
-
-        // learner param
-        bp::class_<PyLearnerParam>(paramClsName.c_str(), bp::init<>())
-            .def("__init__", make_constructor(&pyStructuredPerceptronParamConstructor<PyLearnerParam> ,boost::python::default_call_policies()))
-            .def_readwrite("eps",  &PyLearnerParam::eps_)
-            .def_readwrite("maxIterations", &PyLearnerParam::maxIterations_)
-            .def_readwrite("stopLoss", &PyLearnerParam::stopLoss_)
-            .def_readwrite("decayExponent", &PyLearnerParam::decayExponent_)
-            .def_readwrite("decayT0", &PyLearnerParam::decayT0_)
-            .def_readwrite("learningMode", &PyLearnerParam::learningMode_)
-        ;
-
-
-        // learner
-        bp::class_<PyLearner>( clsName.c_str(), bp::no_init )
-        .def("__init__", make_constructor(&pyStructuredPerceptronConstructor<PyLearner> ,boost::python::default_call_policies()))
-        .def(LearnerInferenceSuite<PyLearner>())
-        ;
-    }
-
-    template void 
-    export_struct_perceptron_learner<op::GmAdderFlexibleLossDataset> (const std::string& className);
-
-    // template void 
-    // export_struct_perceptron_learner<op::GmAdderGeneralizedHammingLossDataset> (const std::string& className);
-}
-
-
diff --git a/src/interfaces/python/opengm/learning/pySubgradientSSVM.cxx b/src/interfaces/python/opengm/learning/pySubgradientSSVM.cxx
deleted file mode 100644
index 00d5a26..0000000
--- a/src/interfaces/python/opengm/learning/pySubgradientSSVM.cxx
+++ /dev/null
@@ -1,80 +0,0 @@
-#include <boost/python.hpp>
-#include <boost/python/module.hpp>
-#include <opengm/python/opengmpython.hxx>
-#include <opengm/python/converter.hxx>
-#include <opengm/python/numpyview.hxx>
-#include <opengm/learning/subgradient_ssvm.hxx>
-
-#define DefaultErrorFn DefaultErrorFn_TrwsExternalSubgradientSSVM
-#include "helper.hxx"
-
-namespace bp = boost::python;
-namespace op = opengm::python;
-namespace ol = opengm::learning;
-
-namespace opengm{
-
-
-    template<class PARAM>
-    PARAM * pyStructuredPerceptronParamConstructor(
-    ){
-        PARAM * p  = new PARAM();
-        return p;
-    }
-
-    template<class L >
-    L * pyStructuredPerceptronConstructor(
-        typename L::DatasetType & dataset,
-        const typename L::Parameter & param
-    ){
-        L * l  = new L(dataset, param);
-        return l;
-    }
-
-    template<class DATASET>
-    void export_subgradient_ssvm_learner(const std::string & clsName){
-        typedef learning::SubgradientSSVM<DATASET> PyLearner;
-        typedef typename PyLearner::Parameter PyLearnerParam;
-
-        const std::string paramClsName = clsName + std::string("Parameter");
-
-        const std::string paramEnumLearningModeName = clsName + std::string("Parameter_LearningMode");
-
-        // learner param enum
-        bp::enum_<typename PyLearnerParam::LearningMode>(paramEnumLearningModeName.c_str())
-            .value("online", PyLearnerParam::Online)
-            .value("batch", PyLearnerParam::Batch)
-        ;
-
-        // learner param
-        bp::class_<PyLearnerParam>(paramClsName.c_str(), bp::init<>())
-            .def("__init__", make_constructor(&pyStructuredPerceptronParamConstructor<PyLearnerParam> ,boost::python::default_call_policies()))
-            .def_readwrite("eps",  &PyLearnerParam::eps_)
-            .def_readwrite("maxIterations", &PyLearnerParam::maxIterations_)
-            .def_readwrite("stopLoss", &PyLearnerParam::stopLoss_)
-            .def_readwrite("learningRate", &PyLearnerParam::learningRate_)
-            .def_readwrite("C", &PyLearnerParam::C_)
-            .def_readwrite("learningMode", &PyLearnerParam::learningMode_)
-            .def_readwrite("averaging", &PyLearnerParam::averaging_)
-            .def_readwrite("nConf", &PyLearnerParam::nConf_)
-        ;
-
-
-        // learner
-        bp::class_<PyLearner>( clsName.c_str(), bp::no_init )
-        .def("__init__", make_constructor(&pyStructuredPerceptronConstructor<PyLearner> ,boost::python::default_call_policies()))
-        .def(LearnerInferenceSuite<PyLearner>())
-        ;
-    }
-
-    // template void 
-    // export_subgradient_ssvm_learner<op::GmAdderHammingLossDataset> (const std::string& className);
-
-    // template void 
-    // export_subgradient_ssvm_learner<op::GmAdderGeneralizedHammingLossDataset> (const std::string& className);
-
-    template void 
-    export_subgradient_ssvm_learner<op::GmAdderFlexibleLossDataset> (const std::string& className);
-}
-
-
diff --git a/src/interfaces/python/opengm/learning/pyWeights.cxx b/src/interfaces/python/opengm/learning/pyWeights.cxx
deleted file mode 100644
index 10afc6e..0000000
--- a/src/interfaces/python/opengm/learning/pyWeights.cxx
+++ /dev/null
@@ -1,46 +0,0 @@
-#include <boost/python.hpp>
-#include <boost/python/module.hpp>
-#include <opengm/python/opengmpython.hxx>
-#include <opengm/python/converter.hxx>
-#include <opengm/python/numpyview.hxx>
-
-
-
-namespace opengm{
-
-    template<class V>
-    learning::Weights<V>  * pyWeightsConstructor(
-        python::NumpyView<V, 1> values                                           
-    ){
-        learning::Weights<V>   * f = new learning::Weights<V> (values.shape(0));
-        for(size_t i=0; i<values.shape(0); ++i){
-            f->setWeight(i, values(i));
-        }
-        return f;
-    }
-
-
-
-    void export_weights(){
-        typedef  python::GmValueType V;
-        typedef learning::Weights<V> Weights;
-        boost::python::class_<Weights>("Weights",boost::python::init<const size_t >())
-            .def("__init__", make_constructor(&pyWeightsConstructor<V> ,boost::python::default_call_policies()))
-            .def("__getitem__", &Weights::getWeight)
-            .def("__setitem__", &Weights::setWeight)
-            .def("__len__", &Weights::numberOfWeights)
-        ;
-    }
-
-    void export_weight_constraints(){
-        typedef  python::GmValueType V;
-        typedef learning::WeightConstraints<V> Weights;
-        boost::python::class_<Weights>("WeightConstraints",boost::python::init<const size_t >())
-            //.def("__init__", make_constructor(&pyWeightsConstructor<V> ,boost::python::default_call_policies()))
-            //.def("__getitem__", &Weights::getWeight)
-            //.def("__setitem__", &Weights::setWeight)
-        ;
-    }
-
-
-}

-- 
Alioth's /usr/local/bin/git-commit-notice on /srv/git.debian.org/git/debian-science/packages/opengm.git



More information about the debian-science-commits mailing list