[opengm] 197/386: loss graphical model type can now be specified in the dataset but has a meaningfull default templatization
Ghislain Vaillant
ghisvail-guest at moszumanska.debian.org
Wed Aug 31 08:37:39 UTC 2016
This is an automated email from the git hooks/post-receive script.
ghisvail-guest pushed a commit to branch debian/master
in repository opengm.
commit 7e04ac8e9c2958834bfe22f7ef5dfb7c256b88d9
Author: DerThorsten <thorsten.beier at iwr.uni-heidelberg.de>
Date: Fri Dec 26 20:03:08 2014 +0100
loss graphical model type can now be specified in the dataset but has a meaningfull default templatization
---
fubar/real_example_2.py | 11 +--
include/opengm/graphicalmodel/graphicalmodel.hxx | 44 +++++++++++-
.../graphicalmodel_function_wrapper.hxx | 79 ++++++++++++++++++++
include/opengm/inference/external/trws.hxx | 17 ++++-
include/opengm/inference/icm.hxx | 2 +-
.../inference/messagepassing/messagepassing.hxx | 30 ++++++++
.../inference/messagepassing/messagepassing_bp.hxx | 11 +++
.../messagepassing/messagepassing_trbp.hxx | 9 +++
include/opengm/learning/dataset/dataset.hxx | 60 ++++++++++++----
.../opengm/learning/dataset/editabledataset.hxx | 26 +++----
include/opengm/learning/struct-max-margin.hxx | 6 +-
include/opengm/learning/structured_perceptron.hxx | 36 ++++++++--
include/opengm/utilities/metaprogramming.hxx | 83 +++++++++++++++++++---
src/interfaces/python/opengm/learning/__init__.py | 8 ++-
.../python/opengm/learning/pyStructPerceptron.cxx | 4 ++
15 files changed, 371 insertions(+), 55 deletions(-)
diff --git a/fubar/real_example_2.py b/fubar/real_example_2.py
index 8eff5d7..a80ad07 100644
--- a/fubar/real_example_2.py
+++ b/fubar/real_example_2.py
@@ -5,13 +5,13 @@ import vigra
import pylab as plt
import pylab
-nModels = 100
+nModels = 10
nLables = 2
shape = [30, 30]
numVar = shape[0]*shape[1]
-sSmooth = [1.0, 1.5, 2.0, 3.0]
-sGrad = [1.0, 1.5, 2.0, 3.0]
+sSmooth = [1.0, 1.5, 2.0, 3.0, 4.0 , 5.0]
+sGrad = [1.0, 1.5, 2.0, 3.0, 4.0, 5.0]
nUWeights = len(sSmooth) + 1
nBWeights = len(sGrad) + 1
@@ -129,9 +129,10 @@ upperBounds = numpy.ones(nWeights)*2.0
nTestPoints =numpy.ones(nWeights).astype('uint64')*5
# learner = learning.gridSearchLearner(dataset=dataset,lowerBounds=lowerBounds, upperBounds=upperBounds,nTestPoints=nTestPoints)
-#0learner = learning.structMaxMarginLearner(dataset, 1.0, 0.001, 0)
+#learner = learning.structMaxMarginLearner(dataset, 1.0, 0.001, 0)
#learner = learning.maxLikelihoodLearner(dataset)
-learner = learning.structPerceptron(dataset)
+learner = learning.structPerceptron(dataset,kappa=0.1)
+
learner.learn(infCls=opengm.inference.QpboExternal,
parameter=opengm.InfParam())
diff --git a/include/opengm/graphicalmodel/graphicalmodel.hxx b/include/opengm/graphicalmodel/graphicalmodel.hxx
index 5ec42a3..a3b2f0e 100755
--- a/include/opengm/graphicalmodel/graphicalmodel.hxx
+++ b/include/opengm/graphicalmodel/graphicalmodel.hxx
@@ -93,6 +93,12 @@ public:
GraphicalModel(const SpaceType& ,const size_t reserveFactorsPerVariable=0);
GraphicalModel& operator=(const GraphicalModel&);
+ template<class OTHER_TL>
+ GraphicalModel& operator=(
+ const GraphicalModel<T, OPERATOR, OTHER_TL, SPACE > & otherGM
+ );
+
+
const SpaceType& space() const;
IndexType numberOfVariables() const;
IndexType numberOfVariables(const IndexType) const;
@@ -169,7 +175,7 @@ public:
//}
-protected:
+//protected:
template<size_t FUNCTION_INDEX>
const std::vector<typename meta::TypeAtTypeList<FunctionTypeList, FUNCTION_INDEX>::type>& functions() const;
template<size_t FUNCTION_INDEX>
@@ -791,6 +797,42 @@ GraphicalModel<T, OPERATOR, FUNCTION_TYPE_LIST, SPACE>::operator=
return *this;
}
+
+template<class T, class OPERATOR, class FUNCTION_TYPE_LIST, class SPACE>
+template<class OTHER_TL>
+inline GraphicalModel<T, OPERATOR, FUNCTION_TYPE_LIST, SPACE>&
+GraphicalModel<T, OPERATOR, FUNCTION_TYPE_LIST, SPACE>::operator=
+(
+ const GraphicalModel<T, OPERATOR, OTHER_TL, SPACE>& gm
+) {
+
+ typedef GraphicalModel<T, OPERATOR, OTHER_TL, SPACE> OtherGm;
+ this->space_ = gm.space_;
+
+ //this->functionDataField_=gm.functionDataField_;
+
+
+
+ std::vector<int> srcFidToTarget(OtherGm::NrOfFunctionTypes,-1);
+ detail_graphical_model::CopyFunctions<0, OtherGm::NrOfFunctionTypes >::op(gm, *this,srcFidToTarget);
+
+ this->factors_.resize(gm.factors_.size());
+ this->variableFactorAdjaceny_=gm.variableFactorAdjaceny_;
+ this->factorsVis_ = gm.factorsVis_;
+ this->order_ = gm.order_;
+
+ for(size_t i = 0; i<this->factors_.size(); ++i) {
+ factors_[i].gm_=this;
+ factors_[i].functionIndex_=gm.factors_[i].functionIndex_;
+
+ int newFidFunctionId = srcFidToTarget[gm.factors_[i].functionTypeId_];
+ OPENGM_CHECK_OP(newFidFunctionId,>,-1,"INTERNAL ERROR");
+ factors_[i].functionTypeId_= newFidFunctionId;
+ factors_[i].vis_=gm.factors_[i].vis_;
+ factors_[i].vis_.assignPtr(this->factorsVis_);
+ }
+ return *this;
+}
template<class T, class OPERATOR, class FUNCTION_TYPE_LIST, class SPACE>
template<size_t FUNCTION_INDEX>
diff --git a/include/opengm/graphicalmodel/graphicalmodel_function_wrapper.hxx b/include/opengm/graphicalmodel/graphicalmodel_function_wrapper.hxx
index 77b4548..a573e92 100755
--- a/include/opengm/graphicalmodel/graphicalmodel_function_wrapper.hxx
+++ b/include/opengm/graphicalmodel/graphicalmodel_function_wrapper.hxx
@@ -39,6 +39,85 @@ template<class GRAPHICAL_MODEL> class Factor;
namespace detail_graphical_model {
+
+ template<bool IN_LIST>
+ struct MaybeCopyFunctionVector;
+
+ template<>
+ struct MaybeCopyFunctionVector<true>{
+
+ template<class FVEC, class GM_T, class SRC_FID_TO_TARGET>
+ void static op(
+ const FVEC & functionsS,
+ GM_T & gmT,
+ SRC_FID_TO_TARGET & srcFidToTarget,
+ size_t indexInSource
+ ){
+
+ typedef typename GM_T::FunctionTypeList TargetList;
+ typedef opengm::meta::GetIndexInTypeList<TargetList,typename FVEC::value_type> IndexGetter;
+
+ srcFidToTarget[indexInSource] = IndexGetter::value;
+ gmT. template functions<IndexGetter::value>() = functionsS;
+ }
+ };
+
+ template<>
+ struct MaybeCopyFunctionVector<false>{
+
+ template<class FVEC, class GM_T, class SRC_FID_TO_TARGET>
+ void static op(
+ const FVEC & functionsS,
+ GM_T & gmT,
+ SRC_FID_TO_TARGET & srcFidToTarget,
+ size_t indexInSource
+ ){
+ srcFidToTarget[indexInSource] = -1;
+ OPENGM_CHECK_OP(functionsS.size(),==,0,"incompatible functions must have zero size");
+ }
+ };
+
+
+ template<size_t I, size_t DX>
+ struct CopyFunctions{
+
+ template<class GM_S, class GM_T, class SRC_FID_TO_TARGET>
+ void static op(
+ const GM_S & gmS,
+ GM_T & gmT,
+ SRC_FID_TO_TARGET & srcFidToTarget
+ ){
+ //
+ typedef typename GM_S::FunctionTypeList SourceList;
+ typedef typename GM_T::FunctionTypeList TargetList;
+ typedef typename opengm::meta::TypeAtTypeList<SourceList, I>::type FType;
+
+ const std::vector<FType> & functions = gmS. template functions<I>();
+
+ typedef MaybeCopyFunctionVector<opengm::meta::HasTypeInTypeList<TargetList, FType>::value > CopyFVec;
+ CopyFVec::op(functions, gmT, srcFidToTarget, I);
+ // next function type
+ CopyFunctions<I+1, DX>::op(gmS,gmT,srcFidToTarget);
+ }
+ };
+ template<size_t DX>
+ struct CopyFunctions<DX,DX>{
+
+ template<class GM_S, class GM_T, class SRC_FID_TO_TARGET>
+ void static op(
+ const GM_S & gmS,
+ GM_T & gmT,
+ SRC_FID_TO_TARGET & srcFidToTarget
+ ){
+
+ }
+ };
+
+
+
+
+
+
#define OPENGM_BASIC_FUNCTION_WRAPPER_CODE_GENERATOR_MACRO( RETURN_TYPE , FUNCTION_NAME ) \
template<size_t NUMBER_OF_FUNCTIONS> \
template<class GM> \
diff --git a/include/opengm/inference/external/trws.hxx b/include/opengm/inference/external/trws.hxx
index cb894f6..2fc0c5e 100644
--- a/include/opengm/inference/external/trws.hxx
+++ b/include/opengm/inference/external/trws.hxx
@@ -60,7 +60,7 @@ namespace opengm {
///Parameter
struct Parameter {
/// possible energy types for TRWS
- enum EnergyType {VIEW, TABLES, TL1, TL2/*, WEIGHTEDTABLE*/};
+ enum EnergyType {VIEW=0, TABLES=1, TL1=2, TL2=3/*, WEIGHTEDTABLE*/};
/// number of iterations
size_t numberOfIterations_;
/// random starting message
@@ -82,11 +82,22 @@ namespace opengm {
useRandomStart_(p.useRandomStart_),
useZeroStart_(p.useZeroStart_),
doBPS_(p.doBPS_),
- energyType_(p.energyType_),
+ energyType_(),
tolerance_(p.tolerance_),
minDualChange_(p.minDualChange_)
{
-
+ if(p.energyType_==0){
+ energyType_ =VIEW;
+ }
+ else if(p.energyType_==1){
+ energyType_ =TABLES;
+ }
+ else if(p.energyType_==2){
+ energyType_ =TL1;
+ }
+ else if(p.energyType_==3){
+ energyType_ =TL2;
+ }
};
Parameter() {
diff --git a/include/opengm/inference/icm.hxx b/include/opengm/inference/icm.hxx
index 05f8655..8b49f87 100644
--- a/include/opengm/inference/icm.hxx
+++ b/include/opengm/inference/icm.hxx
@@ -62,7 +62,7 @@ public:
Parameter(
const OP & otherParameter
){
- moveType_ = otherParameter.moveType_;
+ moveType_ = otherParameter.moveType_== 0? SINGLE_VARIABLE : FACTOR;
}
MoveType moveType_;
diff --git a/include/opengm/inference/messagepassing/messagepassing.hxx b/include/opengm/inference/messagepassing/messagepassing.hxx
index d54c332..56a854b 100644
--- a/include/opengm/inference/messagepassing/messagepassing.hxx
+++ b/include/opengm/inference/messagepassing/messagepassing.hxx
@@ -64,6 +64,21 @@ public:
/// Visitor
typedef visitors::EmptyVisitor<MessagePassing<GM, ACC, UPDATE_RULES, DIST> > EmptyVisitorType;
+
+ template<class _GM>
+ struct RebindGm{
+ typedef typename UPDATE_RULES:: template RebindGm<_GM>::type UR;
+ typedef MessagePassing<_GM, ACC, UR, DIST> type;
+ };
+
+ template<class _GM,class _ACC>
+ struct RebindGmAndAcc{
+ typedef typename UPDATE_RULES:: template RebindGmAndAcc<_GM,_ACC>::type UR;
+ typedef MessagePassing<_GM, _ACC, UR, DIST> type;
+ };
+
+
+
struct Parameter {
typedef typename UPDATE_RULES::SpecialParameterType SpecialParameterType;
Parameter
@@ -82,6 +97,21 @@ public:
specialParameter_(specialParameter),
isAcyclic_(isAcyclic)
{}
+
+ template<class P>
+ Parameter
+ (
+ const P & p
+ )
+ : maximumNumberOfSteps_(p.maximumNumberOfSteps_),
+ bound_(p.bound_),
+ damping_(p.damping_),
+ inferSequential_(p.inferSequential_),
+ useNormalization_(p.useNormalization_),
+ specialParameter_(p.specialParameter_),
+ isAcyclic_(p.isAcyclic_)
+ {}
+
size_t maximumNumberOfSteps_;
ValueType bound_;
diff --git a/include/opengm/inference/messagepassing/messagepassing_bp.hxx b/include/opengm/inference/messagepassing/messagepassing_bp.hxx
index 4146588..c76d74f 100644
--- a/include/opengm/inference/messagepassing/messagepassing_bp.hxx
+++ b/include/opengm/inference/messagepassing/messagepassing_bp.hxx
@@ -87,6 +87,17 @@ namespace opengm {
typedef VariableHullBP<GM, BufferType, OperatorType, ACC> VariableHullType;
typedef meta::EmptyType SpecialParameterType;
+ template<class _GM>
+ struct RebindGm{
+ typedef BeliefPropagationUpdateRules<_GM, ACC, BUFFER> type;
+ };
+
+ template<class _GM,class _ACC>
+ struct RebindGmAndAcc{
+ typedef BeliefPropagationUpdateRules<_GM, _ACC, BUFFER> type;
+ };
+
+
template<class MP_PARAM>
static void initializeSpecialParameter(const GM& gm, MP_PARAM& mpParameter)
{}
diff --git a/include/opengm/inference/messagepassing/messagepassing_trbp.hxx b/include/opengm/inference/messagepassing/messagepassing_trbp.hxx
index 92b8de7..9d16fbb 100644
--- a/include/opengm/inference/messagepassing/messagepassing_trbp.hxx
+++ b/include/opengm/inference/messagepassing/messagepassing_trbp.hxx
@@ -82,6 +82,15 @@ namespace opengm {
typedef FactorHullTRBP<GM, BUFFER, OperatorType, ACC> FactorHullType;
typedef VariableHullTRBP<GM, BUFFER, OperatorType, ACC> VariableHullType;
typedef std::vector<ValueType> SpecialParameterType;
+ template<class _GM>
+ struct RebindGm{
+ typedef TrbpUpdateRules<_GM, ACC, BUFFER> type;
+ };
+
+ template<class _GM,class _ACC>
+ struct RebindGmAndAcc{
+ typedef TrbpUpdateRules<_GM, _ACC, BUFFER> type;
+ };
template<class MP_PARAM>
static void initializeSpecialParameter(const GM& gm,MP_PARAM& mpParameter) {
diff --git a/include/opengm/learning/dataset/dataset.hxx b/include/opengm/learning/dataset/dataset.hxx
index 0ef892e..a830cbd 100644
--- a/include/opengm/learning/dataset/dataset.hxx
+++ b/include/opengm/learning/dataset/dataset.hxx
@@ -6,16 +6,50 @@
#include <cstdlib>
#include "../../graphicalmodel/weights.hxx"
+#include "../../functions/unary_loss_function.hxx"
#include "../loss/noloss.hxx"
namespace opengm {
namespace datasets{
-
- template<class GM, class LOSS=opengm::learning::NoLoss>
+
+ template<class GM>
+ struct DefaultLossGm{
+
+ // make the graphical model with loss
+ typedef typename GM::SpaceType SpaceType;
+ typedef typename GM::ValueType ValueType;
+ typedef typename GM::IndexType IndexType;
+ typedef typename GM::LabelType LabelType;
+ typedef typename GM::OperatorType OperatorType;
+ typedef typename GM::FunctionTypeList OrgFunctionTypeList;
+
+ // extend the typelist
+ typedef typename opengm::meta::TypeListGenerator<
+ opengm::ExplicitFunction<ValueType,IndexType,LabelType>,
+ opengm::UnaryLossFunction<ValueType,IndexType,LabelType>
+ >::type LossOnlyFunctionTypeList;
+
+ typedef typename opengm::meta::MergeTypeListsNoDuplicates<
+ OrgFunctionTypeList,LossOnlyFunctionTypeList
+ >::type CombinedList;
+ // loss graphical model type
+
+ typedef GraphicalModel<ValueType, OperatorType, CombinedList, SpaceType> type;
+ };
+
+ template<class GM, class LOSS=opengm::learning::NoLoss, class LOSS_GM = DefaultLossGm<GM> >
class Dataset{
public:
typedef GM GMType;
- typedef GM GMWITHLOSS;
+
+ // generate the gm with loss here atm (THIS IS WRONG)
+ typedef typename opengm::meta::EvalIf<
+ opengm::meta::Compare<LOSS_GM, DefaultLossGm<GM> >::value,
+ DefaultLossGm<GM>,
+ meta::Self<LOSS_GM>
+ >::type GMWITHLOSS;
+
+ //typedef GM GMWITHLOSS;
typedef LOSS LossType;
typedef typename LOSS::Parameter LossParameterType;
typedef typename GM::ValueType ValueType;
@@ -61,8 +95,8 @@ namespace opengm {
};
- template<class GM, class LOSS>
- Dataset<GM, LOSS>::Dataset(size_t numInstances)
+ template<class GM, class LOSS, class LOSS_GM>
+ Dataset<GM, LOSS, LOSS_GM>::Dataset(size_t numInstances)
: count_(std::vector<size_t>(numInstances)),
isCached_(std::vector<bool>(numInstances)),
gms_(std::vector<GM>(numInstances)),
@@ -73,9 +107,9 @@ namespace opengm {
{
}
- template<class GM, class LOSS>
+ template<class GM, class LOSS, class LOSS_GM>
template<class INF>
- typename GM::ValueType Dataset<GM, LOSS>::getTotalLoss(const typename INF::Parameter& para) const {
+ typename GM::ValueType Dataset<GM, LOSS, LOSS_GM>::getTotalLoss(const typename INF::Parameter& para) const {
ValueType sum=0;
for(size_t i=0; i<this->getNumberOfModels(); ++i) {
sum += this->getLoss<INF>(para, i);
@@ -83,9 +117,9 @@ namespace opengm {
return sum;
}
- template<class GM, class LOSS>
+ template<class GM, class LOSS, class LOSS_GM>
template<class INF>
- typename GM::ValueType Dataset<GM, LOSS>::getLoss(const typename INF::Parameter& para, const size_t i) const {
+ typename GM::ValueType Dataset<GM, LOSS, LOSS_GM>::getLoss(const typename INF::Parameter& para, const size_t i) const {
LOSS lossFunction(lossParams_[i]);
const GM& gm = this->getModel(i);
const std::vector<typename INF::LabelType>& gt = this->getGT(i);
@@ -99,8 +133,8 @@ namespace opengm {
}
- template<class GM, class LOSS>
- void Dataset<GM, LOSS>::buildModelWithLoss(size_t i){
+ template<class GM, class LOSS, class LOSS_GM>
+ void Dataset<GM, LOSS, LOSS_GM>::buildModelWithLoss(size_t i){
OPENGM_ASSERT_OP(i, <, lossParams_.size());
OPENGM_ASSERT_OP(i, <, gmsWithLoss_.size());
OPENGM_ASSERT_OP(i, <, gms_.size());
@@ -114,8 +148,8 @@ namespace opengm {
}
/*
- template<class GM, class LOSS>
- void Dataset<GM, LOSS>::loadAll(std::string datasetpath,std::string prefix){
+ template<class GM, class LOSS, class LOSS_GM>
+ void Dataset<GM, LOSS, LOSS_GM>::loadAll(std::string datasetpath,std::string prefix){
//Load Header
std::stringstream hss;
diff --git a/include/opengm/learning/dataset/editabledataset.hxx b/include/opengm/learning/dataset/editabledataset.hxx
index 73290a0..9ccb505 100644
--- a/include/opengm/learning/dataset/editabledataset.hxx
+++ b/include/opengm/learning/dataset/editabledataset.hxx
@@ -25,11 +25,11 @@ namespace opengm {
// }
// };
- template<class GM, class LOSS>
- class EditableDataset : public Dataset<GM, LOSS>{
+ template<class GM, class LOSS, class LOSS_GM = DefaultLossGm<GM> >
+ class EditableDataset : public Dataset<GM, LOSS, LOSS_GM>{
public:
typedef GM GMType;
- typedef GM GMWITHLOSS;
+ typedef typename Dataset<GM, LOSS, LOSS_GM>::GMWITHLOSS GMWITHLOSS;
typedef LOSS LossType;
typedef typename LOSS::Parameter LossParameterType;
typedef typename GM::ValueType ValueType;
@@ -47,8 +47,8 @@ namespace opengm {
void setWeights(Weights& w);
};
- template<class GM, class LOSS>
- EditableDataset<GM, LOSS>::EditableDataset(std::vector<GM>& gms,
+ template<class GM, class LOSS, class LOSS_GM>
+ EditableDataset<GM, LOSS, LOSS_GM>::EditableDataset(std::vector<GM>& gms,
std::vector<GTVector >& gts,
std::vector<LossParameterType>& lossParams)
: Dataset<GM, LOSS>(gms.size())
@@ -59,8 +59,8 @@ namespace opengm {
}
}
- template<class GM, class LOSS>
- void EditableDataset<GM, LOSS>::setInstance(const size_t i, const GM& gm, const GTVector& gt, const LossParameterType& p) {
+ template<class GM, class LOSS, class LOSS_GM>
+ void EditableDataset<GM, LOSS, LOSS_GM>::setInstance(const size_t i, const GM& gm, const GTVector& gt, const LossParameterType& p) {
OPENGM_CHECK_OP(i, <, this->gms_.size(),"");
OPENGM_CHECK_OP(i, <, this->gts_.size(),"");
OPENGM_CHECK_OP(i, <, this->lossParams_.size(),"");
@@ -73,15 +73,15 @@ namespace opengm {
//std::cout<<"build model with loss DONE\n";
}
- template<class GM, class LOSS>
- void EditableDataset<GM, LOSS>::setGT(const size_t i, const GTVector& gt) {
+ template<class GM, class LOSS, class LOSS_GM>
+ void EditableDataset<GM, LOSS, LOSS_GM>::setGT(const size_t i, const GTVector& gt) {
OPENGM_CHECK_OP(i, <, this->gts_.size(),"");
this->gts_[i] = gt;
this->buildModelWithLoss(i);
}
- template<class GM, class LOSS>
- void EditableDataset<GM, LOSS>::pushBackInstance(const GM& gm, const GTVector& gt, const LossParameterType& p) {
+ template<class GM, class LOSS, class LOSS_GM>
+ void EditableDataset<GM, LOSS, LOSS_GM>::pushBackInstance(const GM& gm, const GTVector& gt, const LossParameterType& p) {
this->gms_.push_back(gm);
this->gts_.push_back(gt);
this->lossParams_.push_back(p);
@@ -94,8 +94,8 @@ namespace opengm {
OPENGM_CHECK_OP(this->gms_.size(), ==, this->gmsWithLoss_.size(),"");
}
- template<class GM, class LOSS>
- void EditableDataset<GM, LOSS>::setWeights(Weights& w) {
+ template<class GM, class LOSS, class LOSS_GM>
+ void EditableDataset<GM, LOSS, LOSS_GM>::setWeights(Weights& w) {
this->weights_ = w;
// LinkWeights<Weights> LinkFunctor(w);
// for(size_t i=0; i<this->gms_.size(); ++i){
diff --git a/include/opengm/learning/struct-max-margin.hxx b/include/opengm/learning/struct-max-margin.hxx
index 41b2bb8..b670866 100644
--- a/include/opengm/learning/struct-max-margin.hxx
+++ b/include/opengm/learning/struct-max-margin.hxx
@@ -20,6 +20,7 @@ public:
typedef O OptimizerType;
typedef typename DatasetType::GMType GMType;
+ typedef typename DatasetType::GMWITHLOSS GMWITHLOSS;
typedef typename DatasetType::ValueType ValueType;
typedef typename DatasetType::Weights Weights;
@@ -156,7 +157,10 @@ template <typename InferenceType>
void
StructMaxMargin<DS, O>::learn(const typename InferenceType::Parameter& infParams) {
- Oracle<InferenceType> oracle(_dataset, infParams);
+ typedef typename InferenceType:: template RebindGm<GMWITHLOSS>::type InfType;
+ typedef typename InfType::Parameter InfTypeParam;
+ InfTypeParam infTypeParam(infParams);
+ Oracle<InfType> oracle(_dataset, infTypeParam);
_weights = _dataset.getWeights();
diff --git a/include/opengm/learning/structured_perceptron.hxx b/include/opengm/learning/structured_perceptron.hxx
index 7ab3fc5..6337df8 100644
--- a/include/opengm/learning/structured_perceptron.hxx
+++ b/include/opengm/learning/structured_perceptron.hxx
@@ -90,7 +90,17 @@ namespace opengm {
class Parameter{
public:
- Parameter(){;}
+ Parameter(){
+ eps_ = 0.00001;
+ maxIterations_ = 0;
+ stopLoss_ = 0.0;
+ kappa_ = 0.1;
+ }
+
+ double eps_;
+ size_t maxIterations_;
+ double stopLoss_;
+ double kappa_;
};
@@ -137,14 +147,22 @@ namespace opengm {
FeatureAcc featureAcc(nWegihts);
- bool doLearning = true;
size_t iteration = 0 ;
- while(doLearning){
+ while(true){
+ if(para_.maxIterations_!=0 && iteration>para_.maxIterations_){
+ std::cout<<"reached max iteration"<<"\n";
+ break;
+ }
// accumulate features
double currentLoss = this-> template accumulateFeatures<INF, FeatureAcc>(para, featureAcc);
- std::cout<<++iteration<<" loss "<<currentLoss<<"\n";
+
+
+ if(currentLoss < para_.stopLoss_){
+ std::cout<<"reached stopLoss"<<"\n";
+ break;
+ }
//if(currentLoss==0){
// doLearning = false;
@@ -154,18 +172,22 @@ namespace opengm {
double wChange = 0.0;
// update weights
for(size_t wi=0; wi<nWegihts; ++wi){
- const double learningRate = 1.0 /( 100.0*std::sqrt(1.0 + iteration));
+ const double learningRate = 1.0 /((1.0/para_.kappa_)*std::sqrt(1.0 + iteration));
const double wOld = dataset_.getWeights().getWeight(wi);
const double wNew = wOld + learningRate*featureAcc.fDiff(wi);
wChange += std::pow(wOld-wNew,2);
dataset_.getWeights().setWeight(wi, wNew);
}
- std::cout<<" wChange"<<wChange<<"\n";
+ ++iteration;
+ if(iteration % 25 ==0)
+ std::cout<<iteration<<" loss "<<currentLoss<<" dw "<<wChange<<"\n";
- if(wChange <= 0.000001 ){
+ if(wChange <= para_.eps_ ){
+ std::cout<<"converged"<<"\n";
break;
}
}
+ weights_ = dataset_.getWeights();
}
template<class DATASET>
diff --git a/include/opengm/utilities/metaprogramming.hxx b/include/opengm/utilities/metaprogramming.hxx
index 4ee2012..808056f 100644
--- a/include/opengm/utilities/metaprogramming.hxx
+++ b/include/opengm/utilities/metaprogramming.hxx
@@ -658,7 +658,16 @@ namespace opengm {
};
typedef HasTypeInTypeList< TTAIL,TypeToFind> type;
};
-
+
+ /// metaprogramming has type in typelist metafunction
+ template<class THEAD,class TTAIL>
+ struct HasTypeInTypeList<meta::TypeList<THEAD,TTAIL>,THEAD > : meta::TrueCase{
+ };
+ /// metaprogramming has type in typelist metafunction
+ template<class TypeToFindx>
+ struct HasTypeInTypeList<meta::ListEnd,TypeToFindx> : meta::FalseCase{
+ };
+
/// metaprogramming find type with a certain size in typelist metafunction
template<class TL,class TSL,size_t SIZE,class NOT_FOUND>
struct FindSizedType;
@@ -695,14 +704,70 @@ namespace opengm {
{
typedef OTHER_TL type;
};
- /// metaprogramming has type in typelist metafunction
- template<class THEAD,class TTAIL>
- struct HasTypeInTypeList<meta::TypeList<THEAD,TTAIL>,THEAD > : meta::TrueCase{
- };
- /// metaprogramming has type in typelist metafunction
- template<class TypeToFindx>
- struct HasTypeInTypeList<meta::ListEnd,TypeToFindx> : meta::FalseCase{
- };
+
+
+
+
+
+ template<class TL, class RES_TL>
+ struct RemoveDuplicates;
+
+
+
+ // entry poit
+ template<class TL>
+ struct RemoveDuplicates<TL, meta::ListEnd>{
+
+
+ // get the first type from tl
+ typedef typename TL::HeadType FirstEntry;
+ // rest of type list
+ typedef typename TL::TailType RestOfList;
+
+ typedef typename RemoveDuplicates<
+ RestOfList,
+ meta::TypeList<FirstEntry, meta::ListEnd>
+ >::type type;
+ };
+
+
+
+ template<class RES_TL>
+ struct RemoveDuplicates<meta::ListEnd, RES_TL>{
+ typedef RES_TL type;
+ };
+
+ template<class TL, class RES_TL>
+ struct RemoveDuplicates{
+
+ // get the first type from tl
+ typedef typename TL::HeadType FirstEntry;
+ // rest of type list
+ typedef typename TL::TailType RestOfList;
+
+
+ typedef typename meta::EvalIf<
+ meta::HasTypeInTypeList<RES_TL, FirstEntry>::value,
+ meta::Self<RES_TL>,
+ meta::BackInsert<RES_TL, FirstEntry>
+ >::type ResultTypeList;
+
+ typedef typename RemoveDuplicates<
+ RestOfList,
+ ResultTypeList
+ >::type type;
+ };
+
+
+
+ template<class TL,class OTHER_TL>
+ struct MergeTypeListsNoDuplicates{
+ typedef typename MergeTypeLists<TL, OTHER_TL>::type WithDuplicates;
+ typedef typename RemoveDuplicates<WithDuplicates, ListEnd>::type type;
+ };
+
+
+
/// metaprogramming inserts a type in typelist or move to end metafunction
///
/// back inserts a type in a typelist. If the type has been in the typelist
diff --git a/src/interfaces/python/opengm/learning/__init__.py b/src/interfaces/python/opengm/learning/__init__.py
index 91c087e..00ef203 100644
--- a/src/interfaces/python/opengm/learning/__init__.py
+++ b/src/interfaces/python/opengm/learning/__init__.py
@@ -92,7 +92,8 @@ def gridSearchLearner(dataset, lowerBounds, upperBounds, nTestPoints):
return learner
-def structPerceptron(dataset):
+def structPerceptron(dataset, eps=1e-5, maxIterations=0, stopLoss=0.0, kappa=0.1):
+
if dataset.__class__.lossType == 'hamming':
learnerCls = StructPerceptron_HammingLoss
@@ -102,7 +103,10 @@ def structPerceptron(dataset):
learnerParamCls = StructPerceptron_GeneralizedHammingLoss
param = learnerParamCls()
-
+ param.eps = eps
+ param.maxIterations
+ param.stopLoss = stopLoss
+ param.kappa = kappa
learner = learnerCls(dataset, param)
return learner
diff --git a/src/interfaces/python/opengm/learning/pyStructPerceptron.cxx b/src/interfaces/python/opengm/learning/pyStructPerceptron.cxx
index 1a98a17..f9a2f06 100644
--- a/src/interfaces/python/opengm/learning/pyStructPerceptron.cxx
+++ b/src/interfaces/python/opengm/learning/pyStructPerceptron.cxx
@@ -41,6 +41,10 @@ namespace opengm{
bp::class_<PyLearnerParam>(paramClsName.c_str(), bp::init<>())
.def("__init__", make_constructor(&pyStructuredPerceptronParamConstructor<PyLearnerParam> ,boost::python::default_call_policies()))
+ .def_readwrite("eps", &PyLearnerParam::eps_)
+ .def_readwrite("maxIterations", &PyLearnerParam::maxIterations_)
+ .def_readwrite("stopLoss", &PyLearnerParam::stopLoss_)
+ .def_readwrite("kappa", &PyLearnerParam::kappa_)
;
bp::class_<PyLearner>( clsName.c_str(), bp::no_init )
--
Alioth's /usr/local/bin/git-commit-notice on /srv/git.debian.org/git/debian-science/packages/opengm.git
More information about the debian-science-commits
mailing list