[opengm] 200/386: implemented subgradient ssvm
Ghislain Vaillant
ghisvail-guest at moszumanska.debian.org
Wed Aug 31 08:37:40 UTC 2016
This is an automated email from the git hooks/post-receive script.
ghisvail-guest pushed a commit to branch debian/master
in repository opengm.
commit 5fd1a86deaf517285f7fd59e5b88de5dc2531924
Author: DerThorsten <thorsten.beier at iwr.uni-heidelberg.de>
Date: Sun Jan 4 23:53:08 2015 +0100
implemented subgradient ssvm
---
fubar/real_example_2.py | 6 +-
include/opengm/learning/gradient-accumulator.hxx | 25 ++-
include/opengm/learning/loss/flexibleloss.hxx | 209 +++++++++++++++++++++
include/opengm/learning/structured_perceptron.hxx | 8 +-
...uctured_perceptron.hxx => subgradient_ssvm.hxx} | 88 ++++-----
src/interfaces/python/opengm/learning/__init__.py | 9 +-
src/interfaces/python/opengm/learning/learning.cxx | 2 +-
.../python/opengm/learning/pySubgradientSSVM.cxx | 75 ++++++++
8 files changed, 362 insertions(+), 60 deletions(-)
diff --git a/fubar/real_example_2.py b/fubar/real_example_2.py
index 09b2a7b..361b4e3 100644
--- a/fubar/real_example_2.py
+++ b/fubar/real_example_2.py
@@ -129,10 +129,10 @@ upperBounds = numpy.ones(nWeights)*2.0
nTestPoints =numpy.ones(nWeights).astype('uint64')*5
# learner = learning.gridSearchLearner(dataset=dataset,lowerBounds=lowerBounds, upperBounds=upperBounds,nTestPoints=nTestPoints)
-#learner = learning.structMaxMarginLearner(dataset, 1.0, 0.001, 0)
+#learner = learning.structMaxMarginLearner(dataset, 0.1, 0.001, 0)
#learner = learning.maxLikelihoodLearner(dataset)
-learner = learning.structPerceptron(dataset, decayExponent=-0.001, learningMode='batch')
-#learner = learning.subgradientSSVM(dataset, decayExponent=-0.001, learningMode='batch')
+#learner = learning.structPerceptron(dataset, decayExponent=-0.5, learningMode='batch')
+learner = learning.subgradientSSVM(dataset, learningRate=1.0, C=100, learningMode='batch')
learner.learn(infCls=opengm.inference.QpboExternal,
diff --git a/include/opengm/learning/gradient-accumulator.hxx b/include/opengm/learning/gradient-accumulator.hxx
index 63e432b..775bf20 100644
--- a/include/opengm/learning/gradient-accumulator.hxx
+++ b/include/opengm/learning/gradient-accumulator.hxx
@@ -80,11 +80,13 @@ struct FeatureAccumulator{
- FeatureAccumulator(const size_t nW)
+ FeatureAccumulator(const size_t nW, bool add = true)
: accWeights_(nW),
gtLabel_(),
- mapLabel_(){
-
+ mapLabel_(),
+ add_(add)
+ {
+
for(size_t i=0; i<accWeights_.size(); ++i){
accWeights_[i] = 0.0;
}
@@ -124,11 +126,19 @@ struct FeatureAccumulator{
const Accessor accessorGt(begin, end, gtLabel_);
const Accessor accessorMap(begin, end, mapLabel_);
- // for test label
- accWeights_[gwi] += f.weightGradient(wi, Iter(accessorMap, 0));
- // for gt label
- accWeights_[gwi] -= f.weightGradient(wi, Iter(accessorGt, 0));
+ if(add_){
+ // for gt label
+ accWeights_[gwi] += f.weightGradient(wi, Iter(accessorGt, 0));
+ // for test label
+ accWeights_[gwi] -= f.weightGradient(wi, Iter(accessorMap, 0));
+ }
+ else{
+ // for gt label
+ accWeights_[gwi] -= f.weightGradient(wi, Iter(accessorGt, 0));
+ // for test label
+ accWeights_[gwi] += f.weightGradient(wi, Iter(accessorMap, 0));
+ }
}
}
}
@@ -156,6 +166,7 @@ struct FeatureAccumulator{
opengm::learning::Weights<double> accWeights_;
LABEL_ITER gtLabel_;
LABEL_ITER mapLabel_;
+ bool add_;
};
diff --git a/include/opengm/learning/loss/flexibleloss.hxx b/include/opengm/learning/loss/flexibleloss.hxx
new file mode 100644
index 0000000..748fa07
--- /dev/null
+++ b/include/opengm/learning/loss/flexibleloss.hxx
@@ -0,0 +1,209 @@
+#pragma once
+#ifndef OPENGM_FLEXIBLE_LOSS_HXX
+#define OPENGM_FLEXIBLE_LOSS_HXX
+
+#include "opengm/functions/explicit_function.hxx"
+#include "opengm/graphicalmodel/graphicalmodel_hdf5.hxx"
+#include "hdf5.h"
+
+namespace opengm {
+namespace learning {
+
+/**
+ * The generalized Hamming Loss incurs a penalty of nodeLossMultiplier[n] * labelLossMultiplier[l]
+ * for node n taking label l, only if l is the same label as in the ground truth this amounts to zero.
+ * One can imagine the overall cost matrix as outer product nodeLossMultiplier * labelLossMultiplier,
+ * with zeros where the node label equals the ground truth.
+ **/
+class FlexibleLoss{
+public:
+ class Parameter{
+ public:
+
+ Parameter{
+ lambdaWeight = 1.0;
+ }
+ enum LossType{
+ Hamming = 0 ,
+ L1 = 1,
+ L2 = 2,
+ Partition = 3,
+ ConfMat = 4
+ };
+
+ bool operator==(const FlexibleLoss & other) const{
+ throw opengm::RuntimeError("do not call me");
+ }
+ bool operator<(const FlexibleLoss & other) const{
+ throw opengm::RuntimeError("do not call me");
+ }
+ bool operator>(const FlexibleLoss & other) const{
+ throw opengm::RuntimeError("do not call me");
+ }
+ double getNodeLossMultiplier(const size_t i) const;
+ double getLabelLossMultiplier(const size_t i) const;
+
+ double getLabelConfMatMultiplier(const size_t l, const size_t lgt)const;
+ /**
+ * serializes the parameter object to the given hdf5 group handle;
+ * the group must contain a dataset "lossType" containing the
+ * loss type as a string
+ **/
+ void save(hid_t& groupHandle) const;
+ void load(const hid_t& groupHandle);
+ static std::size_t getLossId() { return lossId_; }
+
+
+ std::vector<double> nodeLossMultiplier_;
+ std::vector<double> labelLossMultiplier_;
+ std::vector<double> factorMultipier_;
+ marray::Marray<double> confMat_;
+ LossType lossType_;
+ double lambdaWeight;
+
+
+ private:
+ static const std::size_t lossId_ = 16002;
+
+ };
+
+
+public:
+ FlexibleLoss(const Parameter& param = Parameter()) : param_(param){}
+
+ template<class GM, class IT1, class IT2>
+ double loss(const GM & gm, IT1 labelBegin, IT1 labelEnd, IT2 GTBegin,IT2 GTEnd) const;
+
+ template<class GM, class IT>
+ void addLoss(GM& gm, IT GTBegin) const;
+
+private:
+ Parameter param_;
+};
+
+inline double FlexibleLoss::Parameter::getNodeLossMultiplier(const size_t i) const {
+ if(i >= this->nodeLossMultiplier_.size()) {
+ return 1.;
+ }
+ return this->nodeLossMultiplier_[i];
+}
+
+inline double FlexibleLoss::Parameter::getLabelLossMultiplier(const size_t i) const {
+ if(i >= this->labelLossMultiplier_.size()) {
+ return 1.;
+ }
+ return this->labelLossMultiplier_[i];
+}
+
+double FlexibleLoss::Parameter::getLabelConfMatMultiplier(const size_t l, const size_t lgt)const{
+ if(l<confMat_.shape(0) && lgt<confMat_.shape(1)){
+ return confMat_(l, lgt);
+ }
+ return 1.0;
+}
+
+inline void FlexibleLoss::Parameter::save(hid_t& groupHandle) const {
+ std::vector<std::size_t> name;
+ name.push_back(this->getLossId());
+ marray::hdf5::save(groupHandle,"lossId",name);
+
+ if (this->nodeLossMultiplier_.size() > 0) {
+ marray::hdf5::save(groupHandle,"nodeLossMultiplier",this->nodeLossMultiplier_);
+ }
+ if (this->labelLossMultiplier_.size() > 0) {
+ marray::hdf5::save(groupHandle,"labelLossMultiplier",this->labelLossMultiplier_);
+ }
+}
+
+inline void FlexibleLoss::Parameter::load(const hid_t& groupHandle) {
+ if (H5Dopen(groupHandle, "nodeLossMultiplier", H5P_DEFAULT) >= 0) {
+ marray::hdf5::loadVec(groupHandle, "nodeLossMultiplier", this->nodeLossMultiplier_);
+ } else {
+ std::cout << "nodeLossMultiplier of FlexibleLoss not found, setting default values" << std::endl;
+ }
+
+ if (H5Dopen(groupHandle, "labelLossMultiplier", H5P_DEFAULT) >= 0) {
+ marray::hdf5::loadVec(groupHandle, "labelLossMultiplier", this->labelLossMultiplier_);
+ } else {
+ std::cout << "labelLossMultiplier of FlexibleLoss not found, setting default values" << std::endl;
+ }
+}
+
+template<class GM, class IT1, class IT2>
+double FlexibleLoss::loss(const GM & gm, IT1 labelBegin, const IT1 labelEnd, IT2 GTBegin, const IT2 GTEnd) const
+{
+
+
+ double loss = 0.0;
+ size_t nodeIndex = 0;
+ if(param_.lossType_ == Parameter::Hamming){
+ for(; labelBegin!= labelEnd; ++labelBegin, ++GTBegin, ++nodeIndex){
+ if(*labelBegin != *GTBegin){
+ loss += param_.getNodeLossMultiplier(nodeIndex) * param_.getLabelLossMultiplier(*labelBegin);
+ }
+ }
+ }
+ return loss;
+}
+
+template<class GM, class IT>
+void FlexibleLoss::addLoss(GM& gm, IT gt) const
+{
+ typedef typename GM::LabelType LabelType;
+ typedef typename GM::IndexType IndexType;
+ typedef typename GM::ValueType ValueType;
+ typedef opengm::ExplicitFunction<ValueType, IndexType, LabelType> ExplicitFunction;
+
+
+ if(param_.lossType_ == Parameter::Hamming){
+ for(IndexType i=0; i<gm.numberOfVariables(); ++i){
+ LabelType numL = gm.numberOfLabels(i);
+ ExplicitFunction f(&numL, &numL+1, 0);
+ for(LabelType l = 0; l < numL; ++l){
+ f(l) = - param_.getNodeLossMultiplier(i) * param_.getLabelLossMultiplier(l);
+ }
+ f(*gt) = 0;
+ ++gt;
+ gm.addFactor(gm.addFunction(f), &i, &i+1);
+ }
+ }
+ else if(param_.lossType_ == Parameter::L1 || param_.lossType_ == Parameter::L2){
+ const size_t norm == aram_.lossType_ == Parameter::L1 ? 1 : 2;
+ for(IndexType i=0; i<gm.numberOfVariables(); ++i){
+ LabelType numL = gm.numberOfLabels(i);
+ ExplicitFunction f(&numL, &numL+1, 0);
+ const LabelType gtL = *gt;
+ for(LabelType l = 0; l < numL; ++l){
+ f(l) = - param_.getNodeLossMultiplier(i) * std::pow(std::abs(gtL - l), norm) * param_.lambdaWeight;
+ }
+ f(*gt) = 0;
+ ++gt;
+ gm.addFactor(gm.addFunction(f), &i, &i+1);
+ }
+ }
+ else if(param_.lossType_ == Parameter::L1 || param_.lossType_ == Parameter::L2){
+ const size_t norm == aram_.lossType_ == Parameter::L1 ? 1 : 2;
+ for(IndexType i=0; i<gm.numberOfVariables(); ++i){
+ LabelType numL = gm.numberOfLabels(i);
+ ExplicitFunction f(&numL, &numL+1, 0);
+ const LabelType gtL = *gt;
+ for(LabelType l = 0; l < numL; ++l){
+ f(l) = - param_.getNodeLossMultiplier(i) * param_.getLabelConfMatMultiplier(l, gtL);
+ }
+ f(*gt) = 0;
+ ++gt;
+ gm.addFactor(gm.addFunction(f), &i, &i+1);
+ }
+ }
+ else if(param_.lossType_ == Parameter::Partition){
+ throw opengm::RuntimeError("Partition / Multicut Loss is not yet implemented");
+ }
+ else{
+ throw opengm::RuntimeError("INTERNAL ERROR: unknown Loss Type");
+ }
+}
+
+} // namespace learning
+} // namespace opengm
+
+#endif
diff --git a/include/opengm/learning/structured_perceptron.hxx b/include/opengm/learning/structured_perceptron.hxx
index 619dac2..8564aa2 100644
--- a/include/opengm/learning/structured_perceptron.hxx
+++ b/include/opengm/learning/structured_perceptron.hxx
@@ -73,7 +73,7 @@ namespace opengm {
return 1.0;
}
else{
- return std::pow(para_.decayT0_ + static_cast<double>(iteration_),para_.decayExponent_);
+ return std::pow(para_.decayT0_ + static_cast<double>(iteration_+1),para_.decayExponent_);
}
}
@@ -90,7 +90,7 @@ namespace opengm {
template<class DATASET>
StructuredPerceptron<DATASET>::StructuredPerceptron(DATASET& ds, const Parameter& p )
- : dataset_(ds), para_(p),iteration_(0),featureAcc_(ds.getNumberOfWeights())
+ : dataset_(ds), para_(p),iteration_(0),featureAcc_(ds.getNumberOfWeights(),false)
{
featureAcc_.resetWeights();
weights_ = opengm::learning::Weights<double>(ds.getNumberOfWeights());
@@ -198,7 +198,7 @@ namespace opengm {
//
- FeatureAcc featureAcc(nWegihts);
+ FeatureAcc featureAcc(nWegihts,false);
featureAcc.accumulateModelFeatures(gm, dataset_.getGT(gmi).begin(), arg.begin());
@@ -229,7 +229,7 @@ namespace opengm {
const size_t nWegihts = dataset_.getNumberOfWeights();
for(size_t wi=0; wi<nWegihts; ++wi){
const double wOld = dataset_.getWeights().getWeight(wi);
- const double wNew = wOld +1.0*featureAcc_.getWeight(wi);
+ const double wNew = wOld + getLearningRate()*featureAcc_.getWeight(wi);
wChange += std::pow(wOld-wNew,2);
dataset_.getWeights().setWeight(wi, wNew);
}
diff --git a/include/opengm/learning/structured_perceptron.hxx b/include/opengm/learning/subgradient_ssvm.hxx
similarity index 72%
copy from include/opengm/learning/structured_perceptron.hxx
copy to include/opengm/learning/subgradient_ssvm.hxx
index 619dac2..592f53a 100644
--- a/include/opengm/learning/structured_perceptron.hxx
+++ b/include/opengm/learning/subgradient_ssvm.hxx
@@ -1,6 +1,6 @@
#pragma once
-#ifndef OPENGM_STRUCT_PERCEPTRON_LEARNER_HXX
-#define OPENGM_STRUCT_PERCEPTRON_LEARNER_HXX
+#ifndef OPENGM_SUBGRADIENT_SSVM_LEARNER_HXX
+#define OPENGM_SUBGRADIENT_SSVM_LEARNER_HXX
#include <vector>
#include <opengm/inference/inference.hxx>
@@ -17,16 +17,17 @@ namespace opengm {
template<class DATASET>
- class StructuredPerceptron
+ class SubgradientSSVM
{
public:
typedef DATASET DatasetType;
typedef typename DATASET::GMType GMType;
+ typedef typename DATASET::GMWITHLOSS GMWITHLOSS;
typedef typename DATASET::LossType LossType;
typedef typename GMType::ValueType ValueType;
typedef typename GMType::IndexType IndexType;
typedef typename GMType::LabelType LabelType;
-
+ typedef opengm::learning::Weights<double> WeightsType;
typedef typename std::vector<LabelType>::const_iterator LabelIterator;
typedef FeatureAccumulator<GMType, LabelIterator> FeatureAcc;
@@ -43,21 +44,21 @@ namespace opengm {
eps_ = 0.00001;
maxIterations_ = 10000;
stopLoss_ = 0.0;
- decayExponent_ = 0.0;
- decayT0_ = 0.0;
+ learningRate_ = 1.0;
+ C_ = 1.0;
learningMode_ = Online;
}
double eps_;
size_t maxIterations_;
double stopLoss_;
- double decayExponent_;
- double decayT0_;
+ double learningRate_;
+ double C_;
LearningMode learningMode_;
};
- StructuredPerceptron(DATASET&, const Parameter& );
+ SubgradientSSVM(DATASET&, const Parameter& );
template<class INF>
void learn(const typename INF::Parameter& para);
@@ -82,14 +83,14 @@ namespace opengm {
double updateWeights();
DATASET& dataset_;
- opengm::learning::Weights<double> weights_;
+ WeightsType weights_;
Parameter para_;
size_t iteration_;
FeatureAcc featureAcc_;
};
template<class DATASET>
- StructuredPerceptron<DATASET>::StructuredPerceptron(DATASET& ds, const Parameter& p )
+ SubgradientSSVM<DATASET>::SubgradientSSVM(DATASET& ds, const Parameter& p )
: dataset_(ds), para_(p),iteration_(0),featureAcc_(ds.getNumberOfWeights())
{
featureAcc_.resetWeights();
@@ -100,7 +101,12 @@ namespace opengm {
template<class DATASET>
template<class INF>
- void StructuredPerceptron<DATASET>::learn(const typename INF::Parameter& para){
+ void SubgradientSSVM<DATASET>::learn(const typename INF::Parameter& para){
+
+
+ typedef typename INF:: template RebindGm<GMWITHLOSS>::type InfLossGm;
+ typedef typename InfLossGm::Parameter InfLossGmParam;
+ InfLossGmParam infLossGmParam(para);
const size_t nModels = dataset_.getNumberOfModels();
@@ -125,13 +131,13 @@ namespace opengm {
const size_t gmi = randModel();
// lock the model
dataset_.lockModel(gmi);
- const GMType & gm = dataset_.getModel(gmi);
+ const GMWITHLOSS & gmWithLoss = dataset_.getModelWithLoss(gmi);
// do inference
std::vector<LabelType> arg;
- opengm::infer<INF>(gm, para, arg);
+ opengm::infer<InfLossGm>(gmWithLoss, infLossGmParam, arg);
featureAcc_.resetWeights();
- featureAcc_.accumulateModelFeatures(gm, dataset_.getGT(gmi).begin(), arg.begin());
+ featureAcc_.accumulateModelFeatures(dataset_.getModel(gmi), dataset_.getGT(gmi).begin(), arg.begin());
dataset_.unlockModel(gmi);
// update weights
@@ -151,27 +157,6 @@ namespace opengm {
featureAcc_.resetWeights();
- //std::vector< std::vector<LabelType> > args(nModels);
- //#pragma omp parallel for
- //for(size_t gmi=0; gmi<nModels; ++gmi)
- //{
- // int tid = omp_get_thread_num();
- // std::cout<<"Hello World from thread"<<tid<<"\n";
-//
- // dataset_.lockModel(gmi);
- // opengm::infer<INF>(dataset_.getModel(gmi), para, args[gmi]);
- // dataset_.unlockModel(gmi);
- //}
-//
- //for(size_t gmi=0; gmi<nModels; ++gmi)
- //{
- // dataset_.lockModel(gmi);
- // featureAcc_.accumulateModelFeatures(dataset_.getModel(gmi),
- // dataset_.getGT(gmi).begin(),
- // args[gmi].begin());
- // dataset_.unlockModel(gmi);
- //}
-
omp_lock_t modelLockUnlock;
omp_init_lock(&modelLockUnlock);
@@ -191,15 +176,15 @@ namespace opengm {
- const GMType & gm = dataset_.getModel(gmi);
+ const GMWITHLOSS & gmWithLoss = dataset_.getModelWithLoss(gmi);
//run inference
std::vector<LabelType> arg;
- opengm::infer<INF>(gm, para, arg);
+ opengm::infer<InfLossGm>(gmWithLoss, infLossGmParam, arg);
//
FeatureAcc featureAcc(nWegihts);
- featureAcc.accumulateModelFeatures(gm, dataset_.getGT(gmi).begin(), arg.begin());
+ featureAcc.accumulateModelFeatures(dataset_.getModel(gmi), dataset_.getGT(gmi).begin(), arg.begin());
// acc features
@@ -224,12 +209,31 @@ namespace opengm {
template<class DATASET>
- double StructuredPerceptron<DATASET>::updateWeights(){
- double wChange = 0.0;
+ double SubgradientSSVM<DATASET>::updateWeights(){
+
const size_t nWegihts = dataset_.getNumberOfWeights();
+
+ WeightsType p(nWegihts);
+
+ if(para_.learningMode_ == Parameter::Batch){
+ for(size_t wi=0; wi<nWegihts; ++wi){
+ p[wi] = dataset_.getWeights().getWeight(wi);
+ p[wi] += para_.C_ * featureAcc_.getWeight(wi)/double(dataset_.getNumberOfModels());
+ }
+ }
+ else{
+ for(size_t wi=0; wi<nWegihts; ++wi){
+ p[wi] = dataset_.getWeights().getWeight(wi);
+ p[wi] += para_.C_ * featureAcc_.getWeight(wi);
+ }
+ }
+
+
+ double wChange = 0.0;
+
for(size_t wi=0; wi<nWegihts; ++wi){
const double wOld = dataset_.getWeights().getWeight(wi);
- const double wNew = wOld +1.0*featureAcc_.getWeight(wi);
+ const double wNew = wOld - (para_.learningRate_/double(iteration_+1))*p[wi];
wChange += std::pow(wOld-wNew,2);
dataset_.getWeights().setWeight(wi, wNew);
}
diff --git a/src/interfaces/python/opengm/learning/__init__.py b/src/interfaces/python/opengm/learning/__init__.py
index c0a918e..e067465 100644
--- a/src/interfaces/python/opengm/learning/__init__.py
+++ b/src/interfaces/python/opengm/learning/__init__.py
@@ -40,6 +40,9 @@ MaxLikelihood_GeneralizedHammingLoss.learn =_extendedLearn
StructPerceptron_HammingLoss.learn =_extendedLearn
StructPerceptron_GeneralizedHammingLoss.learn =_extendedLearn
+SubgradientSSVM_HammingLoss.learn =_extendedLearn
+SubgradientSSVM_GeneralizedHammingLoss.learn =_extendedLearn
+
if opengmConfig.withCplex or opengmConfig.withGurobi :
StructMaxMargin_Bundle_HammingLoss.learn = _extendedLearn
@@ -124,7 +127,7 @@ def structPerceptron(dataset, learningMode='online',eps=1e-5, maxIterations=1000
return learner
-def subgradientSSVM(dataset, learningMode='online',eps=1e-5, maxIterations=10000, stopLoss=0.0, decayExponent=0.0, decayT0=0.0):
+def subgradientSSVM(dataset, learningMode='batch',eps=1e-5, maxIterations=10000, stopLoss=0.0, learningRate=1.0, C=100.0):
if dataset.__class__.lossType == 'hamming':
@@ -149,8 +152,8 @@ def subgradientSSVM(dataset, learningMode='online',eps=1e-5, maxIterations=10000
param.eps = float(eps)
param.maxIterations = int(maxIterations)
param.stopLoss = float(stopLoss)
- param.decayExponent = float(decayExponent)
- param.decayT0 = float(decayT0)
+ param.learningRate = float(learningRate)
+ param.C = float(C)
param.learningMode = lm
learner = learnerCls(dataset, param)
return learner
diff --git a/src/interfaces/python/opengm/learning/learning.cxx b/src/interfaces/python/opengm/learning/learning.cxx
index 05c0821..000a869 100644
--- a/src/interfaces/python/opengm/learning/learning.cxx
+++ b/src/interfaces/python/opengm/learning/learning.cxx
@@ -87,7 +87,7 @@ BOOST_PYTHON_MODULE_INIT(_learning) {
//opengm::export_struct_perceptron_learner<op::GmAdderGeneralizedHammingLossDataset>("StructPerceptron_FlexibleLoss");
opengm::export_subgradient_ssvm_learner<op::GmAdderHammingLossDataset>("SubgradientSSVM_HammingLoss");
- opengm::export_subgradient_ssvm_learner<op::GmAdderHammingLossDataset>("SubgradientSSVM_GeneralizedHammingLoss");
+ opengm::export_subgradient_ssvm_learner<op::GmAdderGeneralizedHammingLossDataset>("SubgradientSSVM_GeneralizedHammingLoss");
opengm::export_max_likelihood_learner<op::GmAdderHammingLossDataset>("MaxLikelihood_HammingLoss");
opengm::export_max_likelihood_learner<op::GmAdderGeneralizedHammingLossDataset>("MaxLikelihood_GeneralizedHammingLoss");
diff --git a/src/interfaces/python/opengm/learning/pySubgradientSSVM.cxx b/src/interfaces/python/opengm/learning/pySubgradientSSVM.cxx
new file mode 100644
index 0000000..2ca92c9
--- /dev/null
+++ b/src/interfaces/python/opengm/learning/pySubgradientSSVM.cxx
@@ -0,0 +1,75 @@
+#include <boost/python.hpp>
+#include <boost/python/module.hpp>
+#include <opengm/python/opengmpython.hxx>
+#include <opengm/python/converter.hxx>
+#include <opengm/python/numpyview.hxx>
+#include <opengm/learning/subgradient_ssvm.hxx>
+
+#define DefaultErrorFn DefaultErrorFn_TrwsExternalSubgradientSSVM
+#include "helper.hxx"
+
+namespace bp = boost::python;
+namespace op = opengm::python;
+namespace ol = opengm::learning;
+
+namespace opengm{
+
+
+ template<class PARAM>
+ PARAM * pyStructuredPerceptronParamConstructor(
+ ){
+ PARAM * p = new PARAM();
+ return p;
+ }
+
+ template<class L >
+ L * pyStructuredPerceptronConstructor(
+ typename L::DatasetType & dataset,
+ const typename L::Parameter & param
+ ){
+ L * l = new L(dataset, param);
+ return l;
+ }
+
+ template<class DATASET>
+ void export_subgradient_ssvm_learner(const std::string & clsName){
+ typedef learning::SubgradientSSVM<DATASET> PyLearner;
+ typedef typename PyLearner::Parameter PyLearnerParam;
+
+ const std::string paramClsName = clsName + std::string("Parameter");
+
+ const std::string paramEnumLearningModeName = clsName + std::string("Parameter_LearningMode");
+
+ // learner param enum
+ bp::enum_<typename PyLearnerParam::LearningMode>(paramEnumLearningModeName.c_str())
+ .value("online", PyLearnerParam::Online)
+ .value("batch", PyLearnerParam::Batch)
+ ;
+
+ // learner param
+ bp::class_<PyLearnerParam>(paramClsName.c_str(), bp::init<>())
+ .def("__init__", make_constructor(&pyStructuredPerceptronParamConstructor<PyLearnerParam> ,boost::python::default_call_policies()))
+ .def_readwrite("eps", &PyLearnerParam::eps_)
+ .def_readwrite("maxIterations", &PyLearnerParam::maxIterations_)
+ .def_readwrite("stopLoss", &PyLearnerParam::stopLoss_)
+ .def_readwrite("learningRate", &PyLearnerParam::learningRate_)
+ .def_readwrite("C", &PyLearnerParam::C_)
+ .def_readwrite("learningMode", &PyLearnerParam::learningMode_)
+ ;
+
+
+ // learner
+ bp::class_<PyLearner>( clsName.c_str(), bp::no_init )
+ .def("__init__", make_constructor(&pyStructuredPerceptronConstructor<PyLearner> ,boost::python::default_call_policies()))
+ .def(LearnerInferenceSuite<PyLearner>())
+ ;
+ }
+
+ template void
+ export_subgradient_ssvm_learner<op::GmAdderHammingLossDataset> (const std::string& className);
+
+ template void
+ export_subgradient_ssvm_learner<op::GmAdderGeneralizedHammingLossDataset> (const std::string& className);
+}
+
+
--
Alioth's /usr/local/bin/git-commit-notice on /srv/git.debian.org/git/debian-science/packages/opengm.git
More information about the debian-science-commits
mailing list