[opengm] 196/386: implemented structured perceptron
Ghislain Vaillant
ghisvail-guest at moszumanska.debian.org
Wed Aug 31 08:37:39 UTC 2016
This is an automated email from the git hooks/post-receive script.
ghisvail-guest pushed a commit to branch debian/master
in repository opengm.
commit 27f2a5b972cba5f4a3a63441c3f044f174e1c5e7
Author: DerThorsten <thorsten.beier at iwr.uni-heidelberg.de>
Date: Thu Dec 25 21:00:39 2014 +0100
implemented structured perceptron
---
fubar/real_example_2.py | 20 +-
.../graphicalmodel/graphicalmodel_factor.hxx | 28 +++
include/opengm/inference/external/ad3.hxx | 23 +++
include/opengm/inference/external/daoopt.hxx | 26 +++
include/opengm/inference/external/fastPD.hxx | 16 ++
include/opengm/inference/external/mrflib.hxx | 15 ++
include/opengm/inference/external/qpbo.hxx | 4 +-
include/opengm/inference/external/trws.hxx | 25 +++
include/opengm/learning/structured_perceptron.hxx | 227 +++++++++++++++++++++
include/opengm/utilities/shape_accessor.hxx | 36 ++++
.../python/opengm/learning/CMakeLists.txt | 4 +-
src/interfaces/python/opengm/learning/__init__.py | 17 ++
src/interfaces/python/opengm/learning/learning.cxx | 13 +-
.../python/opengm/learning/pyStructPerceptron.cxx | 59 ++++++
14 files changed, 499 insertions(+), 14 deletions(-)
diff --git a/fubar/real_example_2.py b/fubar/real_example_2.py
index d565002..8eff5d7 100644
--- a/fubar/real_example_2.py
+++ b/fubar/real_example_2.py
@@ -5,13 +5,13 @@ import vigra
import pylab as plt
import pylab
-nModels = 1
+nModels = 100
nLables = 2
-shape = [10, 10]
+shape = [30, 30]
numVar = shape[0]*shape[1]
-sSmooth = [1.0, 1.5]
-sGrad = [1.0, 1.5]
+sSmooth = [1.0, 1.5, 2.0, 3.0]
+sGrad = [1.0, 1.5, 2.0, 3.0]
nUWeights = len(sSmooth) + 1
nBWeights = len(sGrad) + 1
@@ -35,12 +35,12 @@ dataset = learning.createDataset(numWeights=nWeights, loss='h')
weights = dataset.getWeights()
def makeFeatures(gt):
- random = (numpy.random.rand(*gt.shape)-0.5)*5.0
+ random = (numpy.random.rand(*gt.shape)-0.5)*3.0
randGt = random + gt
- #vigra.imshow(randGt)
- #plt.colorbar()
- #vigra.show()
+ # vigra.imshow(randGt)
+ # plt.colorbar()
+ # vigra.show()
# f = pylab.figure()
# for n, a in enumerate([gt, randGt]):
@@ -129,9 +129,9 @@ upperBounds = numpy.ones(nWeights)*2.0
nTestPoints =numpy.ones(nWeights).astype('uint64')*5
# learner = learning.gridSearchLearner(dataset=dataset,lowerBounds=lowerBounds, upperBounds=upperBounds,nTestPoints=nTestPoints)
-learner = learning.structMaxMarginLearner(dataset, 1.0, 0.001, 0)
+#0learner = learning.structMaxMarginLearner(dataset, 1.0, 0.001, 0)
#learner = learning.maxLikelihoodLearner(dataset)
-
+learner = learning.structPerceptron(dataset)
learner.learn(infCls=opengm.inference.QpboExternal,
parameter=opengm.InfParam())
diff --git a/include/opengm/graphicalmodel/graphicalmodel_factor.hxx b/include/opengm/graphicalmodel/graphicalmodel_factor.hxx
index 87b782e..0985830 100755
--- a/include/opengm/graphicalmodel/graphicalmodel_factor.hxx
+++ b/include/opengm/graphicalmodel/graphicalmodel_factor.hxx
@@ -191,6 +191,34 @@ public:
ValueType min() const;
ValueType max() const;
IndexType dimension()const{return this->numberOfVariables();}
+
+
+
+ template<class LABEL_ITER>
+ struct GmToLabelIter{
+ typedef GmLabelFactorLabelAccessor<Factor<GRAPHICAL_MODEL>, LABEL_ITER> Accessor;
+ typedef AccessorIterator<Accessor, true> Iter;
+ };
+
+ template<class LABEL_ITER>
+ typename GmToLabelIter<LABEL_ITER>::Iter
+ gmToFactorLabelsBegin(LABEL_ITER gmLabelsBegin)const{
+ typedef typename GmToLabelIter<LABEL_ITER>::Accessor Accessor;
+ typedef typename GmToLabelIter<LABEL_ITER>::Iter Iter;
+ Accessor accessor(*this, gmLabelsBegin);
+ return Iter(accessor, 0);
+ }
+
+ template<class LABEL_ITER>
+ typename GmToLabelIter<LABEL_ITER>::Iter
+ gmToFactorLabelsEnd(LABEL_ITER gmLabelsBegin)const{
+ typedef typename GmToLabelIter<LABEL_ITER>::Accessor Accessor;
+ typedef typename GmToLabelIter<LABEL_ITER>::Iter Iter;
+ Accessor accessor(*this, gmLabelsBegin);
+ return Iter(accessor, this->numberOfVariables());
+ }
+
+
private:
void testInvariant() const;
//std::vector<IndexType> & variableIndexSequence();
diff --git a/include/opengm/inference/external/ad3.hxx b/include/opengm/inference/external/ad3.hxx
index 42c9d2d..8ccaaba 100644
--- a/include/opengm/inference/external/ad3.hxx
+++ b/include/opengm/inference/external/ad3.hxx
@@ -30,6 +30,16 @@ namespace opengm {
typedef visitors::EmptyVisitor<AD3Inf<GM,ACC> > EmptyVisitorType;
typedef visitors::TimingVisitor<AD3Inf<GM,ACC> > TimingVisitorType;
+ template<class _GM>
+ struct RebindGm{
+ typedef AD3Inf<_GM,ACC> type;
+ };
+
+ template<class _GM,class _ACC>
+ struct RebindGmAndAcc{
+ typedef AD3Inf<_GM,_ACC> type;
+ };
+
enum SolverType{
AD3_LP,
AD3_ILP,
@@ -54,6 +64,19 @@ namespace opengm {
{
}
+ template<class P>
+ Parameter(
+ const P & p
+ ) :
+ solverType_(p.solverType_),
+ eta_(p.eta_),
+ adaptEta_(p.adaptEta_),
+ steps_(p.steps_),
+ residualThreshold_(p.residualThreshold_),
+ verbosity_(p.verbosity_)
+ {
+ }
+
SolverType solverType_;
double eta_;
diff --git a/include/opengm/inference/external/daoopt.hxx b/include/opengm/inference/external/daoopt.hxx
index 477553c..9d584df 100644
--- a/include/opengm/inference/external/daoopt.hxx
+++ b/include/opengm/inference/external/daoopt.hxx
@@ -51,6 +51,17 @@ namespace opengm {
typedef visitors::EmptyVisitor<DAOOPT<GM> > EmptyVisitorType;
typedef visitors::TimingVisitor<DAOOPT<GM> > TimingVisitorType;
+
+ template<class _GM>
+ struct RebindGm{
+ typedef DAOOPT<_GM,ACC> type;
+ };
+
+ template<class _GM,class _ACC>
+ struct RebindGmAndAcc{
+ typedef DAOOPT<_GM,_ACC> type;
+ };
+
///Parameter inherits from daoopt ProgramOptions
struct Parameter : public daoopt::ProgramOptions {
/// \brief Constructor
@@ -68,6 +79,21 @@ namespace opengm {
sampleRepeat = 1;
aobbLookahead = 5;
}
+ template<class P>
+ Parameter(const P & p) : daoopt::ProgramOptions() {
+ // set default options, this is not done for all parameters by daoopt
+ subprobOrder = p.subprobOrder;
+ ibound = p.ibound;
+ cbound = p.cbound;
+ cbound_worker = p.cbound_worker;
+ rotateLimit = p.rotateLimit;
+ order_iterations = p.order_iterations;
+ order_timelimit = p.order_timelimit;
+ threads = p.threads;
+ sampleDepth = p.sampleDepth;
+ sampleRepeat = p.sampleRepeat;
+ aobbLookahead = p.aobbLookahead;
+ }
};
// construction
diff --git a/include/opengm/inference/external/fastPD.hxx b/include/opengm/inference/external/fastPD.hxx
index 0c8abe2..f200f30 100644
--- a/include/opengm/inference/external/fastPD.hxx
+++ b/include/opengm/inference/external/fastPD.hxx
@@ -35,6 +35,16 @@ namespace opengm {
typedef visitors::EmptyVisitor<FastPD<GM> > EmptyVisitorType;
typedef visitors::TimingVisitor<FastPD<GM> > TimingVisitorType;
+ template<class _GM>
+ struct RebindGm{
+ typedef FastPD<_GM,ACC> type;
+ };
+
+ template<class _GM,class _ACC>
+ struct RebindGmAndAcc{
+ typedef FastPD<_GM,_ACC> type;
+ };
+
///Parameter
struct Parameter {
/// \brief Constructor
@@ -42,6 +52,12 @@ namespace opengm {
}
/// number of iterations
size_t numberOfIterations_;
+
+ template<class P>
+ Parameter(const P & p)
+ : numberOfIterations_(p.numberOfIterations_){
+
+ }
};
// construction
FastPD(const GraphicalModelType& gm, const Parameter& para = Parameter());
diff --git a/include/opengm/inference/external/mrflib.hxx b/include/opengm/inference/external/mrflib.hxx
index 857a20b..294ee23 100644
--- a/include/opengm/inference/external/mrflib.hxx
+++ b/include/opengm/inference/external/mrflib.hxx
@@ -43,6 +43,17 @@ namespace opengm {
typedef visitors::TimingVisitor<MRFLIB<GM> > TimingVisitorType;
typedef size_t VariableIndex;
///Parameter
+
+ template<class _GM>
+ struct RebindGm{
+ typedef MRFLIB<_GM> type;
+ };
+
+ template<class _GM,class _ACC>
+ struct RebindGmAndAcc{
+ typedef MRFLIB<_GM> type;
+ };
+
struct Parameter {
/// possible optimization algorithms for MRFLIB
enum InferenceType {ICM, EXPANSION, SWAP, MAXPRODBP, TRWS, BPS};
@@ -60,6 +71,10 @@ namespace opengm {
Parameter(const InferenceType inferenceType = ICM, const EnergyType energyType = VIEW, const size_t numberOfIterations = 1000)
: inferenceType_(inferenceType), energyType_(energyType), numberOfIterations_(numberOfIterations), trwsTolerance_(0.0) {
}
+ template<class P>
+ Parameter(const P & p)
+ : inferenceType_(p.inferenceType_), energyType_(p.energyType_), numberOfIterations_(p.numberOfIterations_), trwsTolerance_(p.trwsTolerance_) {
+ }
};
// construction
MRFLIB(const GraphicalModelType& gm, const Parameter& para = Parameter());
diff --git a/include/opengm/inference/external/qpbo.hxx b/include/opengm/inference/external/qpbo.hxx
index 91bb777..ff69937 100644
--- a/include/opengm/inference/external/qpbo.hxx
+++ b/include/opengm/inference/external/qpbo.hxx
@@ -36,12 +36,12 @@ namespace opengm {
};
template<class _GM>
- class RebindGm{
+ struct RebindGm{
typedef QPBO<_GM> type;
};
template<class _GM,class _ACC>
- class RebindGmAndAcc{
+ struct RebindGmAndAcc{
typedef QPBO<_GM> type;
};
diff --git a/include/opengm/inference/external/trws.hxx b/include/opengm/inference/external/trws.hxx
index 0cd09a6..cb894f6 100644
--- a/include/opengm/inference/external/trws.hxx
+++ b/include/opengm/inference/external/trws.hxx
@@ -45,6 +45,18 @@ namespace opengm {
typedef visitors::EmptyVisitor<TRWS<GM> > EmptyVisitorType;
typedef visitors::TimingVisitor<TRWS<GM> > TimingVisitorType;
typedef size_t VariableIndex;
+
+
+ template<class _GM>
+ struct RebindGm{
+ typedef TRWS<_GM> type;
+ };
+
+ template<class _GM,class _ACC>
+ struct RebindGmAndAcc{
+ typedef TRWS<_GM> type;
+ };
+
///Parameter
struct Parameter {
/// possible energy types for TRWS
@@ -64,6 +76,19 @@ namespace opengm {
/// TRWS termintas if fabs(bound(t)-bound(t+1)) < minDualChange_
double minDualChange_;
/// \brief Constructor
+ template<class P>
+ Parameter(const P & p)
+ : numberOfIterations_(p.numberOfIterations_),
+ useRandomStart_(p.useRandomStart_),
+ useZeroStart_(p.useZeroStart_),
+ doBPS_(p.doBPS_),
+ energyType_(p.energyType_),
+ tolerance_(p.tolerance_),
+ minDualChange_(p.minDualChange_)
+ {
+
+ };
+
Parameter() {
numberOfIterations_ = 1000;
useRandomStart_ = false;
diff --git a/include/opengm/learning/structured_perceptron.hxx b/include/opengm/learning/structured_perceptron.hxx
new file mode 100644
index 0000000..7ab3fc5
--- /dev/null
+++ b/include/opengm/learning/structured_perceptron.hxx
@@ -0,0 +1,227 @@
+#pragma once
+#ifndef OPENGM_GRIDSEARCH_LEARNER_HXX
+#define OPENGM_GRIDSEARCH_LEARNER_HXX
+
+#include <vector>
+
+namespace opengm {
+ namespace learning {
+
+
+ // map a global labeling
+ // to a factor labeling iterator
+
+
+
+ template<class GM, class LABEL_ITER>
+ struct FeatureAccumulator{
+
+ typedef typename GM::LabelType LabelType;
+ typedef typename GM::IndexType IndexType;
+ typedef typename GM::ValueType ValueType;
+
+
+ FeatureAccumulator(const size_t nW)
+ : accFeaturesGt_(nW),
+ accFeaturesMap_(nW),
+ gtLabel_(),
+ mapLabel_(),
+ factor_(NULL){
+ }
+
+ void setLabels(const LABEL_ITER gtLabel, const LABEL_ITER mapLabel){
+ gtLabel_ = gtLabel;
+ mapLabel_ = mapLabel;
+ }
+
+ void resetWeights(){
+ for(size_t i=0; i<accFeaturesGt_.size(); ++i){
+ accFeaturesGt_[i] = 0.0;
+ accFeaturesMap_[i] = 0.0;
+ }
+ }
+ double fDiff(const size_t wi)const{
+ return accFeaturesMap_[wi] - accFeaturesGt_[wi];
+ }
+ void setFactor(const typename GM::FactorType & factor){
+ factor_ = &factor;
+ }
+ template<class F>
+ void operator()(const F & f){
+
+ // get the number of weights
+ const size_t nWeights = f.numberOfWeights();
+ if(nWeights>0){
+ // loop over all weights
+ for(size_t wi=0; wi<nWeights; ++wi){
+ // accumulate features for both labeling
+ const size_t gwi = f.weightIndex(wi);
+
+ // for gt label
+ accFeaturesGt_[gwi] += f.weightGradient(wi, factor_->gmToFactorLabelsBegin(gtLabel_));
+
+ // for test label
+ accFeaturesMap_[gwi] += f.weightGradient(wi, factor_->gmToFactorLabelsBegin(mapLabel_));
+ }
+ }
+ }
+
+
+ std::vector<double> accFeaturesGt_;
+ std::vector<double> accFeaturesMap_;
+ LABEL_ITER gtLabel_;
+ LABEL_ITER mapLabel_;
+ const typename GM::FactorType * factor_;
+ };
+
+
+
+
+ template<class DATASET>
+ class StructuredPerceptron
+ {
+ public:
+ typedef DATASET DatasetType;
+ typedef typename DATASET::GMType GMType;
+ typedef typename DATASET::LossType LossType;
+ typedef typename GMType::ValueType ValueType;
+ typedef typename GMType::IndexType IndexType;
+ typedef typename GMType::LabelType LabelType;
+
+ class Parameter{
+ public:
+ Parameter(){;}
+ };
+
+
+ StructuredPerceptron(DATASET&, const Parameter& );
+
+ template<class INF>
+ void learn(const typename INF::Parameter& para);
+ //template<class INF, class VISITOR>
+ //void learn(typename INF::Parameter para, VITITOR vis);
+
+ const opengm::learning::Weights<double>& getWeights(){return weights_;}
+ Parameter& getLerningParameters(){return para_;}
+
+ private:
+
+ template<class INF, class FEATURE_ACCUMULATOR>
+ double accumulateFeatures(const typename INF::Parameter& para, FEATURE_ACCUMULATOR & featureAcc);
+
+ DATASET& dataset_;
+ opengm::learning::Weights<double> weights_;
+ Parameter para_;
+ };
+
+ template<class DATASET>
+ StructuredPerceptron<DATASET>::StructuredPerceptron(DATASET& ds, const Parameter& p )
+ : dataset_(ds), para_(p)
+ {
+ weights_ = opengm::learning::Weights<double>(ds.getNumberOfWeights());
+
+ }
+
+
+ template<class DATASET>
+ template<class INF>
+ void StructuredPerceptron<DATASET>::learn(const typename INF::Parameter& para){
+
+
+ typedef typename std::vector<LabelType>::const_iterator LabelIterator;
+ typedef FeatureAccumulator<GMType, LabelIterator> FeatureAcc;
+
+
+ const size_t nModels = dataset_.getNumberOfModels();
+ const size_t nWegihts = dataset_.getNumberOfWeights();
+
+ FeatureAcc featureAcc(nWegihts);
+
+ bool doLearning = true;
+
+ size_t iteration = 0 ;
+ while(doLearning){
+
+ // accumulate features
+ double currentLoss = this-> template accumulateFeatures<INF, FeatureAcc>(para, featureAcc);
+ std::cout<<++iteration<<" loss "<<currentLoss<<"\n";
+
+ //if(currentLoss==0){
+ // doLearning = false;
+ // break;
+ //}
+
+ double wChange = 0.0;
+ // update weights
+ for(size_t wi=0; wi<nWegihts; ++wi){
+ const double learningRate = 1.0 /( 100.0*std::sqrt(1.0 + iteration));
+ const double wOld = dataset_.getWeights().getWeight(wi);
+ const double wNew = wOld + learningRate*featureAcc.fDiff(wi);
+ wChange += std::pow(wOld-wNew,2);
+ dataset_.getWeights().setWeight(wi, wNew);
+ }
+ std::cout<<" wChange"<<wChange<<"\n";
+
+ if(wChange <= 0.000001 ){
+ break;
+ }
+ }
+ }
+
+ template<class DATASET>
+ template<class INF, class FEATURE_ACCUMULATOR>
+ double StructuredPerceptron<DATASET>::accumulateFeatures(
+ const typename INF::Parameter& para,
+ FEATURE_ACCUMULATOR & featureAcc
+ ){
+
+
+ typedef typename std::vector<LabelType>::const_iterator LabelIterator;
+ typedef FeatureAccumulator<GMType, LabelIterator> FeatureAcc;
+ const size_t nModels = dataset_.getNumberOfModels();
+
+ double totalLoss=0.0;
+
+ // reset the accumulated features
+ featureAcc.resetWeights();
+
+ // iterate over all models
+ for(size_t gmi=0; gmi<nModels; ++gmi){
+
+ // lock the model
+ dataset_.lockModel(gmi);
+
+ // get model
+ const GMType & gm = dataset_.getModel(gmi);
+
+ // do inference
+ INF inf(gm, para);
+ std::vector<LabelType> arg;
+ inf.infer();
+ inf.arg(arg);
+
+ LossType lossFunction(dataset_.getLossParameters(gmi));
+
+ totalLoss +=lossFunction.loss(gm, arg.begin(), arg.end(),
+ dataset_.getGT(gmi).begin(), dataset_.getGT(gmi).end());
+
+ // pass arg and gt to featureAccumulator
+ featureAcc.setLabels(dataset_.getGT(gmi).begin(), arg.begin());
+
+
+ // iterate over all factors
+ // and accumulate features
+ for(size_t fi=0; fi<gm.numberOfFactors(); ++fi){
+ featureAcc.setFactor(gm[fi]);
+ gm[fi].callFunctor(featureAcc);
+ }
+ // unlock model
+ dataset_.unlockModel(gmi);
+ }
+
+ return totalLoss;
+ }
+
+}
+}
+#endif
diff --git a/include/opengm/utilities/shape_accessor.hxx b/include/opengm/utilities/shape_accessor.hxx
index 5541832..ac5a355 100644
--- a/include/opengm/utilities/shape_accessor.hxx
+++ b/include/opengm/utilities/shape_accessor.hxx
@@ -67,6 +67,42 @@ namespace opengm {
factor_pointer factor_;
};
+
+ template<class FACTOR, class GM_LABEL_ITER>
+ class GmLabelFactorLabelAccessor {
+ public:
+ typedef typename std::iterator_traits<GM_LABEL_ITER>::value_type value_type;
+
+ typedef const value_type reference;
+ typedef const value_type* pointer;
+ typedef const FACTOR& factor_reference;
+ typedef const FACTOR* factor_pointer;
+
+ GmLabelFactorLabelAccessor()
+ : factor_(NULL),
+ gmLabelIter_()
+ {}
+ GmLabelFactorLabelAccessor(factor_reference f , GM_LABEL_ITER iter)
+ : factor_(&f),
+ gmLabelIter_(iter)
+ {}
+ size_t size() const
+ { return factor_ == 0 ? 0 : factor_->numberOfVariables(); }
+ reference operator[](const size_t j)
+ { return gmLabelIter_[factor_->variableIndex(j)]; }
+ const value_type operator[](const size_t j) const
+ { return gmLabelIter_[factor_->variableIndex(j)]; }
+ bool operator==(const FactorShapeAccessor<FACTOR> & other) const
+ { return factor_ == other.factor_;
+ }
+
+ private:
+ factor_pointer factor_;
+ GM_LABEL_ITER gmLabelIter_;
+ };
+
+
+
template<class FACTOR>
class FactorVariablesAccessor {
public:
diff --git a/src/interfaces/python/opengm/learning/CMakeLists.txt b/src/interfaces/python/opengm/learning/CMakeLists.txt
index af13bd9..8f0f489 100644
--- a/src/interfaces/python/opengm/learning/CMakeLists.txt
+++ b/src/interfaces/python/opengm/learning/CMakeLists.txt
@@ -25,7 +25,9 @@ set(PY_OPENGM_CORE_SOURCES
pyLoss.cxx
pyGridSearchLearner.cxx
pyMaxLikelihoodLearner.cxx
- pyStructMaxMarginLearner.cxx)
+ pyStructMaxMarginLearner.cxx
+ pyStructPerceptron.cxx
+ )
if(APPLE)
add_library(_learning MODULE ${PY_OPENGM_CORE_SOURCES})
diff --git a/src/interfaces/python/opengm/learning/__init__.py b/src/interfaces/python/opengm/learning/__init__.py
index 40994db..91c087e 100644
--- a/src/interfaces/python/opengm/learning/__init__.py
+++ b/src/interfaces/python/opengm/learning/__init__.py
@@ -37,6 +37,10 @@ GridSearch_GeneralizedHammingLoss.learn =_extendedLearn
MaxLikelihood_HammingLoss.learn =_extendedLearn
MaxLikelihood_GeneralizedHammingLoss.learn =_extendedLearn
+StructPerceptron_HammingLoss.learn =_extendedLearn
+StructPerceptron_GeneralizedHammingLoss.learn =_extendedLearn
+
+
if opengmConfig.withCplex or opengmConfig.withGurobi :
StructMaxMargin_Bundle_HammingLoss.learn = _extendedLearn
StructMaxMargin_Bundle_GeneralizedHammingLoss.learn = _extendedLearn
@@ -88,6 +92,19 @@ def gridSearchLearner(dataset, lowerBounds, upperBounds, nTestPoints):
return learner
+def structPerceptron(dataset):
+
+ if dataset.__class__.lossType == 'hamming':
+ learnerCls = StructPerceptron_HammingLoss
+ learnerParamCls = StructPerceptron_HammingLossParameter
+ elif dataset.__class__.lossType == 'generalized-hamming':
+ learnerCls = StructPerceptron_GeneralizedHammingLossParameter
+ learnerParamCls = StructPerceptron_GeneralizedHammingLoss
+
+ param = learnerParamCls()
+
+ learner = learnerCls(dataset, param)
+ return learner
def structMaxMarginLearner(dataset, regularizerWeight=1.0, minEps=1e-5, nSteps=0, epsStrategy='change', optimizer='bundle'):
diff --git a/src/interfaces/python/opengm/learning/learning.cxx b/src/interfaces/python/opengm/learning/learning.cxx
index d7333ee..8697dc2 100644
--- a/src/interfaces/python/opengm/learning/learning.cxx
+++ b/src/interfaces/python/opengm/learning/learning.cxx
@@ -38,6 +38,9 @@ namespace opengm{
template<class DATASET>
void export_max_likelihood_learner(const std::string & clsName);
+ template<class DATASET>
+ void export_struct_perceptron_learner(const std::string & clsName);
+
template<class GM_ADDER,class GM_MULT>
void export_lfunction_generator();
}
@@ -70,9 +73,17 @@ BOOST_PYTHON_MODULE_INIT(_learning) {
opengm::export_grid_search_learner<op::GmAdderHammingLossDataset>("GridSearch_HammingLoss");
opengm::export_grid_search_learner<op::GmAdderGeneralizedHammingLossDataset>("GridSearch_GeneralizedHammingLoss");
- opengm::export_grid_search_learner<op::GmAdderGeneralizedHammingLossDataset>("GridSearch_FlexibleLoss");
+ //opengm::export_grid_search_learner<op::GmAdderGeneralizedHammingLossDataset>("GridSearch_FlexibleLoss");
+
+
+ opengm::export_struct_perceptron_learner<op::GmAdderHammingLossDataset>("StructPerceptron_HammingLoss");
+ opengm::export_struct_perceptron_learner<op::GmAdderGeneralizedHammingLossDataset>("StructPerceptron_GeneralizedHammingLoss");
+ //opengm::export_struct_perceptron_learner<op::GmAdderGeneralizedHammingLossDataset>("StructPerceptron_FlexibleLoss");
+
+
+
opengm::export_max_likelihood_learner<op::GmAdderHammingLossDataset>("MaxLikelihood_HammingLoss");
opengm::export_max_likelihood_learner<op::GmAdderGeneralizedHammingLossDataset>("MaxLikelihood_GeneralizedHammingLoss");
diff --git a/src/interfaces/python/opengm/learning/pyStructPerceptron.cxx b/src/interfaces/python/opengm/learning/pyStructPerceptron.cxx
new file mode 100644
index 0000000..1a98a17
--- /dev/null
+++ b/src/interfaces/python/opengm/learning/pyStructPerceptron.cxx
@@ -0,0 +1,59 @@
+#include <boost/python.hpp>
+#include <boost/python/module.hpp>
+#include <opengm/python/opengmpython.hxx>
+#include <opengm/python/converter.hxx>
+#include <opengm/python/numpyview.hxx>
+#include <opengm/learning/structured_perceptron.hxx>
+
+#define DefaultErrorFn DefaultErrorFn_TrwsExternalSPerceptron
+#include "helper.hxx"
+
+namespace bp = boost::python;
+namespace op = opengm::python;
+namespace ol = opengm::learning;
+
+namespace opengm{
+
+
+ template<class PARAM>
+ PARAM * pyStructuredPerceptronParamConstructor(
+ ){
+ PARAM * p = new PARAM();
+ return p;
+ }
+
+ template<class L >
+ L * pyStructuredPerceptronConstructor(
+ typename L::DatasetType & dataset,
+ const typename L::Parameter & param
+ ){
+ L * l = new L(dataset, param);
+ return l;
+ }
+
+ template<class DATASET>
+ void export_struct_perceptron_learner(const std::string & clsName){
+ typedef learning::StructuredPerceptron<DATASET> PyLearner;
+ typedef typename PyLearner::Parameter PyLearnerParam;
+
+ const std::string paramClsName = clsName + std::string("Parameter");
+
+
+ bp::class_<PyLearnerParam>(paramClsName.c_str(), bp::init<>())
+ .def("__init__", make_constructor(&pyStructuredPerceptronParamConstructor<PyLearnerParam> ,boost::python::default_call_policies()))
+ ;
+
+ bp::class_<PyLearner>( clsName.c_str(), bp::no_init )
+ .def("__init__", make_constructor(&pyStructuredPerceptronConstructor<PyLearner> ,boost::python::default_call_policies()))
+ .def(LearnerInferenceSuite<PyLearner>())
+ ;
+ }
+
+ template void
+ export_struct_perceptron_learner<op::GmAdderHammingLossDataset> (const std::string& className);
+
+ template void
+ export_struct_perceptron_learner<op::GmAdderGeneralizedHammingLossDataset> (const std::string& className);
+}
+
+
--
Alioth's /usr/local/bin/git-commit-notice on /srv/git.debian.org/git/debian-science/packages/opengm.git
More information about the debian-science-commits
mailing list