[opengm] 320/386: Merge branch 'master' of https://github.com/opengm/opengm into structured_learning_2
Ghislain Vaillant
ghisvail-guest at moszumanska.debian.org
Wed Aug 31 08:38:20 UTC 2016
This is an automated email from the git hooks/post-receive script.
ghisvail-guest pushed a commit to branch debian/master
in repository opengm.
commit ddd1c0c9d8a23009dbb97c24ff8bf2041a1c50ee
Merge: 4964326 e3408d0
Author: Janez Ales <janez.ales at iwr.uni-heidelberg.de>
Date: Thu Mar 3 12:38:25 2016 +0100
Merge branch 'master' of https://github.com/opengm/opengm into structured_learning_2
Conflicts:
include/opengm/inference/fusion_based_inf.hxx
include/opengm/inference/multicut.hxx
CMakeLists.txt | 76 +-
ChangeLog.txt | 48 +
Doxyfile.in | 2 +-
README.md | 23 +
include/opengm/functions/accumulated_view.hxx | 159 ++
include/opengm/functions/explicit_function.hxx | 29 +
include/opengm/functions/pottsg.hxx | 163 +-
include/opengm/functions/view.hxx | 2 +-
include/opengm/graphicalmodel/graphicalmodel.hxx | 6 +-
.../modelgenerators/syntheticmodelgenerator.hxx | 15 +-
include/opengm/inference/astar.hxx | 2 +-
.../fusion_move/high_level_fusion_mover.hxx | 429 ++++
.../fusion_move/permutable_label_fusion_mover.hxx | 1004 ++++++++
.../opengm/inference/auxiliary/planar_graph.hxx | 662 +++++
.../opengm/inference/auxiliary/planar_maxcut.hxx | 67 +-
.../inference/auxiliary/planar_maxcut_graph.hxx | 4 +-
.../proposal_generator/multi_label_proposals.hxx | 177 ++
include/opengm/inference/cgc.hxx | 730 ++++++
.../inference/cgc/generate_starting_point.hxx | 56 +
include/opengm/inference/cgc/submodel2.hxx | 998 ++++++++
include/opengm/inference/cgc/visitors.hxx | 62 +
include/opengm/inference/dmc.hxx | 312 +++
include/opengm/inference/external/mplp.hxx | 3 +-
include/opengm/inference/external/trws.hxx | 56 +-
include/opengm/inference/fusion_based_inf.hxx | 2524 +++++++++++---------
.../opengm/inference/intersection_based_inf.hxx | 1535 ++++++++++++
include/opengm/inference/lpcplex.hxx | 1 +
include/opengm/inference/multicut.hxx | 203 +-
include/opengm/operations/minimizer.hxx | 2 +-
include/opengm/unittests/blackboxtester.hxx | 27 +-
.../unittests/blackboxtests/blackboxtestgrid.hxx | 11 +-
include/opengm/unittests/test.hxx | 2 +-
include/opengm/utilities/accumulation.hxx | 2 +-
include/opengm/utilities/canonical_view.hxx | 323 +++
include/opengm/utilities/meminfo.hxx | 3 +-
include/opengm/utilities/metaprogramming.hxx | 16 +
include/opengm/utilities/partitions.hxx | 147 ++
include/opengm/utilities/tribool.hxx | 16 +-
src/converter/CMakeLists.txt | 15 +-
src/converter/matching2opengm-N2N.cxx | 151 ++
src/converter/matching2opengm.cxx | 516 ++++
src/external/patches/IBFS/ibfs-new.patch | 20 +
src/external/patches/IBFS/patchIBFS.sh | 22 +-
src/external/patches/Planarity/patchPlanarity.sh | 14 +-
src/external/patches/Planarity/planarity.patch | 27 +
src/interfaces/commandline/double/CMakeLists.txt | 6 +
.../commandline/double/opengm_min_sum.cxx | 21 +-
src/interfaces/common/caller/cgc_caller.hxx | 83 +
.../common/caller/intersection_based_caller.hxx | 246 ++
src/interfaces/common/caller/multicut_caller.hxx | 1 +
.../matlab/opengm/m_files/model/openGMModel.m | 14 +-
.../matlab/opengm/mex-src/CMakeLists.txt | 12 +-
.../matlab/opengm/mex-src/model/evaluate.cpp | 72 +
.../matlab/opengm/mex-src/model/getPottsModel.cpp | 30 +-
.../opengm/mex-src/model/setMulticutModel.cpp | 64 +
src/interfaces/python/CMakeLists.txt | 33 +-
src/interfaces/python/examples/freelena.bmp | Bin 0 -> 786554 bytes
.../python/examples/inference_fusion_based.py | 10 +
src/interfaces/python/examples/lena.bmp | Bin 786486 -> 0 bytes
src/interfaces/python/examples/mrf/denoise.py | 4 +-
src/interfaces/python/examples/test_fancy_stuff.py | 107 +
src/interfaces/python/opengm/__init__.py | 40 +-
src/interfaces/python/opengm/hdf5/CMakeLists.txt | 3 -
.../python/opengm/inference/CMakeLists.txt | 86 +-
.../python/opengm/inference/inf_def_visitor.hxx | 2 +-
.../python/opengm/inference/inference.cpp | 16 +
.../python/opengm/inference/param/cgc_param.hxx | 69 +
.../opengm/inference/param/fusion_based_param.hxx | 53 +-
.../inference/param/intersection_based_param.hxx | 306 +++
.../opengm/inference/param/multicut_param.hxx | 8 +-
src/interfaces/python/opengm/inference/pyCgc.cxx | 175 ++
src/interfaces/python/opengm/inference/pyCgc.hxx | 5 +
.../python/opengm/inference/pyFusionBased.cxx | 61 +-
.../opengm/inference/pyIntersectionBased.cxx | 130 +
.../opengm/inference/pyIntersectionBased.hxx | 4 +
.../python/opengm/opengmcore/CMakeLists.txt | 3 -
.../python/opengm/opengmcore/__init__.py | 2 +
.../python/opengm/opengmcore/function_injector.py | 2 +-
src/tutorials/c++/applications/CMakeLists.txt | 2 +-
src/tutorials/matlab/applications/binaryNDSeg.m | 57 +
src/tutorials/matlab/demo/demo1.m | 5 +
src/tutorials/matlab/demo/demo2.m | 67 +
src/tutorials/matlab/demo/demo3.m | 43 +
src/tutorials/matlab/demo/demo4.m | 52 +
src/tutorials/matlab/demo/denoise_fast.m | 41 +
src/tutorials/matlab/demo/setup.m | 5 +
src/tutorials/python/demo/demo1.py | 56 +
src/tutorials/python/demo/demo2.py | 56 +
src/tutorials/python/demo/demo3.py | 50 +
src/tutorials/python/demo/demo4.py | 62 +
src/tutorials/python/demo/demo5.py | 92 +
src/unittest/CMakeLists.txt | 6 +
src/unittest/inference/CMakeLists.txt | 20 +
src/unittest/inference/test_cgc.cxx | 139 ++
src/unittest/inference/test_graphcut.cxx | 17 +-
src/unittest/inference/test_ibfs.cxx | 39 +
src/unittest/inference/test_planar_maxcut.cxx | 88 +-
src/unittest/inference/test_trws.cxx | 8 +
src/unittest/test_canonicalview.cxx | 133 ++
src/unittest/test_functions.cxx | 36 +-
src/unittest/test_graphicalmodeldecomposer.cxx | 117 +-
src/unittest/test_memoryinfo.cxx | 22 +-
src/unittest/test_operations.cxx | 54 +-
src/unittest/test_partitions.cxx | 60 +
104 files changed, 11994 insertions(+), 1663 deletions(-)
diff --cc include/opengm/inference/external/trws.hxx
index 2fc0c5e,69ce500..3be5757
--- a/include/opengm/inference/external/trws.hxx
+++ b/include/opengm/inference/external/trws.hxx
@@@ -75,31 -63,9 +75,33 @@@ namespace opengm
double tolerance_;
/// TRWS termintas if fabs(bound(t)-bound(t+1)) < minDualChange_
double minDualChange_;
+ /// Calculate MinMarginals
+ bool calculateMinMarginals_;
/// \brief Constructor
+ template<class P>
+ Parameter(const P & p)
+ : numberOfIterations_(p.numberOfIterations_),
+ useRandomStart_(p.useRandomStart_),
+ useZeroStart_(p.useZeroStart_),
+ doBPS_(p.doBPS_),
+ energyType_(),
+ tolerance_(p.tolerance_),
+ minDualChange_(p.minDualChange_)
+ {
+ if(p.energyType_==0){
+ energyType_ =VIEW;
+ }
+ else if(p.energyType_==1){
+ energyType_ =TABLES;
+ }
+ else if(p.energyType_==2){
+ energyType_ =TL1;
+ }
+ else if(p.energyType_==3){
+ energyType_ =TL2;
+ }
+ };
+
Parameter() {
numberOfIterations_ = 1000;
useRandomStart_ = false;
diff --cc include/opengm/inference/fusion_based_inf.hxx
index 5d94024,d4f2a1c..1331c31
--- a/include/opengm/inference/fusion_based_inf.hxx
+++ b/include/opengm/inference/fusion_based_inf.hxx
@@@ -53,1279 -53,873 +53,1476 @@@ namespace openg
- template<class GM, class ACC>
- class AutoTunedSmoothing{
- public:
++//<<<<<<< HEAD
++//template<class GM, class ACC>
++//class AutoTunedSmoothing{
++//public:
+
- template<class _GM>
- struct RebindGm{
- typedef AutoTunedSmoothing<_GM, ACC> type;
- };
++// template<class _GM>
++// struct RebindGm{
++// typedef AutoTunedSmoothing<_GM, ACC> type;
++// };
+
- template<class _GM,class _ACC>
- struct RebindGmAndAcc{
- typedef AutoTunedSmoothing<_GM, _ACC > type;
- };
++// template<class _GM,class _ACC>
++// struct RebindGmAndAcc{
++// typedef AutoTunedSmoothing<_GM, _ACC > type;
++// };
+
+
- typedef ACC AccumulationType;
- typedef GM GraphicalModelType;
- OPENGM_GM_TYPE_TYPEDEFS;
- struct Parameter
- {
- Parameter(){}
++// typedef ACC AccumulationType;
++// typedef GM GraphicalModelType;
++// OPENGM_GM_TYPE_TYPEDEFS;
++// struct Parameter
++// {
++// Parameter(){}
+
- template<class P>
- Parameter(const P & p){
++// template<class P>
++// Parameter(const P & p){
+
- }
- };
++// }
++// };
++//=======
++//>>>>>>> e3408d084b219dce69a515117c3c1253e3cb5b7d
- AutoTunedSmoothing(const GM & gm, const Parameter & param)
- :
- gm_(gm),
- param_(param),
- unaries_(),
- hasUnaries_(gm.numberOfVariables(), false)
- {
- size_t shape[2] = {gm_.numberOfVariables(), gm_.maxNumberOfLabels()};
- hasUnaries_.resize(shape, shape+2,ACC:: template neutral<ValueType>());
- for(IndexType vi=0; vi<gm_.numberOfVariables(); ++vi){
- const IndexType nFac = gm_.numberOfFactors(vi);
- for(IndexType f=0; f<nFac; ++f){
- const IndexType fi = gm_.factorOfVariable(vi, f);
- if(gm_[fi].numberOfVariables()==1){
- hasUnaries_[vi]=true;
- }
- }
- }
- }
+ namespace proposal_gen{
- private:
- const GM & gm_;
- Parameter param_;
- marray::Marray<ValueType> unaries_;
- std::vector<unsigned char> hasUnaries_;
- };
- template<class GM, class ACC>
- class AlphaExpansionGen
- {
- public:
++//<<<<<<< HEAD
++//template<class GM, class ACC>
++//class AlphaExpansionGen
++//{
++//public:
+
- template<class _GM>
- struct RebindGm{
- typedef AlphaExpansionGen<_GM, ACC> type;
- };
++// template<class _GM>
++// struct RebindGm{
++// typedef AlphaExpansionGen<_GM, ACC> type;
++// };
+
- template<class _GM,class _ACC>
- struct RebindGmAndAcc{
- typedef AlphaExpansionGen<_GM, _ACC > type;
- };
++// template<class _GM,class _ACC>
++// struct RebindGmAndAcc{
++// typedef AlphaExpansionGen<_GM, _ACC > type;
++// };
+
- typedef ACC AccumulationType;
- typedef GM GraphicalModelType;
- OPENGM_GM_TYPE_TYPEDEFS;
- struct Parameter
- {
- Parameter(){}
- template<class P>
- Parameter(const P & p){
++// typedef ACC AccumulationType;
++// typedef GM GraphicalModelType;
++// OPENGM_GM_TYPE_TYPEDEFS;
++// struct Parameter
++// {
++// Parameter(){}
++// template<class P>
++// Parameter(const P & p){
+
- }
- };
- AlphaExpansionGen(const GM &gm, const Parameter ¶m)
- : gm_(gm),
- param_(param),
- currentAlpha_(0)
- {
- maxLabel_ =0;
- for(size_t i=0; i<gm.numberOfVariables();++i){
- if(gm.numberOfLabels(i)>maxLabel_){
- maxLabel_ = gm.numberOfLabels(i);
- }
- }
- }
- void reset()
- {
- currentAlpha_ = 0;
- }
++// }
++// };
++// AlphaExpansionGen(const GM &gm, const Parameter ¶m)
++// : gm_(gm),
++// param_(param),
++// currentAlpha_(0)
++// {
++// maxLabel_ =0;
++// for(size_t i=0; i<gm.numberOfVariables();++i){
++// if(gm.numberOfLabels(i)>maxLabel_){
++// maxLabel_ = gm.numberOfLabels(i);
++// }
++// }
++// }
++// void reset()
++// {
++// currentAlpha_ = 0;
++// }
+
- size_t defaultNumStopIt() {return maxLabel_;}
++// size_t defaultNumStopIt() {return maxLabel_;}
+
- void getProposal(const std::vector<LabelType> ¤t , std::vector<LabelType> &proposal)
- {
- for (IndexType vi = 0; vi < gm_.numberOfVariables(); ++vi)
- {
- if (gm_.numberOfLabels(vi) > currentAlpha_ )
- {
- proposal[vi] = currentAlpha_;
- }
- else
- {
- proposal[vi] = current[vi];
- }
- }
- ++currentAlpha_;
- if(currentAlpha_>=maxLabel_){
- currentAlpha_ = 0;
- }
- }
- LabelType currentAlpha(){return currentAlpha_;}
- private:
- const GM &gm_;
- Parameter param_;
- LabelType maxLabel_;
- LabelType currentAlpha_;
- };
++// void getProposal(const std::vector<LabelType> ¤t , std::vector<LabelType> &proposal)
++//=======
-
-
- template<class GM, class ACC>
- class MJumpUpDownGen
- {
- public:
- typedef ACC AccumulationType;
- typedef GM GraphicalModelType;
- OPENGM_GM_TYPE_TYPEDEFS;
-
- template<class _GM>
- struct RebindGm{
- typedef MJumpUpDownGen<_GM, ACC> type;
- };
-
- template<class _GM,class _ACC>
- struct RebindGmAndAcc{
- typedef MJumpUpDownGen<_GM, _ACC > type;
- };
-
- struct Parameter
+ template<class GM, class ACC>
+ class AlphaExpansionGen
++//>>>>>>> e3408d084b219dce69a515117c3c1253e3cb5b7d
{
- Parameter(
- const std::string startDirection = std::string("up")
- )
- : startDirection_(startDirection)
+ public:
+ typedef ACC AccumulationType;
+ typedef GM GraphicalModelType;
+ OPENGM_GM_TYPE_TYPEDEFS;
+ struct Parameter
{
-
- }
- template<class P>
- Parameter(const P & p)
- : startDirection_(p.startDirection_){
-
- }
- std::string startDirection_;
- };
- MJumpUpDownGen(const GM &gm, const Parameter ¶m)
- : gm_(gm),
- param_(param),
- argBuffer_(gm.numberOfVariables(),0),
- direction_(gm.numberOfVariables()),
- jumpSize_(gm.numberOfVariables(),1)
- {
- this->reset();
- }
- void reset()
- {
- if(param_.startDirection_== std::string("random")){
- for(size_t i=0; i<gm_.numberOfVariables();++i){
- direction_[i]=rand()%2 == 0 ? -1:1;
- }
- }
- else if(param_.startDirection_== std::string("up")){
- for(size_t i=0; i<gm_.numberOfVariables();++i){
- direction_[i]=1;
- }
- }
- else if(param_.startDirection_== std::string("down")){
- for(size_t i=0; i<gm_.numberOfVariables();++i){
- direction_[i]=-1;
- }
+ Parameter(){}
+ };
+ AlphaExpansionGen(const GM &gm, const Parameter ¶m)
+ : gm_(gm),
+ param_(param),
+ currentAlpha_(0)
+ {
+ maxLabel_ =0;
+ for(size_t i=0; i<gm.numberOfVariables();++i){
+ if(gm.numberOfLabels(i)>maxLabel_){
+ maxLabel_ = gm.numberOfLabels(i);
+ }
+ }
}
- else{
- throw opengm::RuntimeError("wrong starting direction for JumpUpDownGen");
+ void reset()
+ {
+ currentAlpha_ = 0;
}
- }
-
- size_t defaultNumStopIt() {return gm_.maxNumberOfLabels();}
-
- void getProposal(const std::vector<LabelType> ¤t , std::vector<LabelType> &proposal)
- {
- for (IndexType vi = 0; vi < gm_.numberOfVariables(); ++vi)
+
+ size_t defaultNumStopIt() {return maxLabel_;}
+
+ void getProposal(const std::vector<LabelType> ¤t , std::vector<LabelType> &proposal)
{
- const size_t numL = gm_.numberOfLabels(vi);
-
- const LabelType ol = argBuffer_[vi];
- const LabelType cl = current[vi];
-
- std::copy(current.begin(), current.end(), argBuffer_.begin());
-
- // flip direction?
- if(ol == cl){
- if(jumpSize_[vi] == 1)
- direction_[vi]*=-1;
- else{
- jumpSize_[vi]/=2;
+ for (IndexType vi = 0; vi < gm_.numberOfVariables(); ++vi)
+ {
+ if (gm_.numberOfLabels(vi) > currentAlpha_ )
+ {
+ proposal[vi] = currentAlpha_;
+ }
+ else
+ {
+ proposal[vi] = current[vi];
}
}
- else{
- jumpSize_[vi]*=2;
+ ++currentAlpha_;
+ if(currentAlpha_>=maxLabel_){
+ currentAlpha_ = 0;
}
- const LabelType d = direction_[vi];
- const LabelType js = jumpSize_[vi];
+ }
+ LabelType currentAlpha(){return currentAlpha_;}
+ private:
+ const GM &gm_;
+ Parameter param_;
+ LabelType maxLabel_;
+ LabelType currentAlpha_;
+ };
- if(d>=1){
- if(cl+js < gm_.numberOfLabels(vi)){
- proposal[vi] = cl + js;
- }
- else{
- direction_[vi] = -1;
- proposal[vi] = gm_.numberOfLabels(vi)-1;
- jumpSize_[vi] = 1;
- }
- }
- else{
- if(cl>=js){
- proposal[vi] = cl - js;
- }
- else{
- direction_[vi] = 1;
- proposal[vi] = 0 ;
- jumpSize_[vi] = 1;
- }
- }
- }
- }
- private:
- const GM &gm_;
- Parameter param_;
- std::vector<LabelType> argBuffer_;
- std::vector<LabelType> direction_;
- std::vector<LabelType> jumpSize_;
- };
- template<class GM, class ACC>
- class JumpUpDownGen
- {
- public:
++//<<<<<<< HEAD
++//template<class GM, class ACC>
++//class MJumpUpDownGen
++//{
++//public:
++// typedef ACC AccumulationType;
++// typedef GM GraphicalModelType;
++// OPENGM_GM_TYPE_TYPEDEFS;
+
- template<class _GM>
- struct RebindGm{
- typedef JumpUpDownGen<_GM, ACC> type;
- };
++// template<class _GM>
++// struct RebindGm{
++// typedef MJumpUpDownGen<_GM, ACC> type;
++// };
+
- template<class _GM,class _ACC>
- struct RebindGmAndAcc{
- typedef JumpUpDownGen<_GM, _ACC > type;
- };
- typedef ACC AccumulationType;
- typedef GM GraphicalModelType;
- OPENGM_GM_TYPE_TYPEDEFS;
- struct Parameter
- {
- Parameter(
- const std::string startDirection = std::string("up")
- )
- : startDirection_(startDirection)
- {
++// template<class _GM,class _ACC>
++// struct RebindGmAndAcc{
++// typedef MJumpUpDownGen<_GM, _ACC > type;
++// };
+
- }
++// struct Parameter
++//=======
- template<class P>
- Parameter(const P & p)
- : startDirection_(p.startDirection_){
-
- }
- std::string startDirection_;
- };
- JumpUpDownGen(const GM &gm, const Parameter ¶m)
- : gm_(gm),
- param_(param),
- argBuffer_(gm.numberOfVariables(),0),
- direction_(gm.numberOfVariables()),
- jumpSize_(gm.numberOfVariables(),1)
- {
- this->reset();
- }
- void reset()
+ template<class GM, class ACC>
+ class UpDownGen
++//>>>>>>> e3408d084b219dce69a515117c3c1253e3cb5b7d
{
- if(param_.startDirection_== std::string("random")){
- for(size_t i=0; i<gm_.numberOfVariables();++i){
- direction_[i]=rand()%2 == 0 ? -1:1;
- }
- }
- else if(param_.startDirection_== std::string("up")){
- for(size_t i=0; i<gm_.numberOfVariables();++i){
- direction_[i]=1;
- }
- }
- else if(param_.startDirection_== std::string("down")){
- for(size_t i=0; i<gm_.numberOfVariables();++i){
- direction_[i]=-1;
- }
- }
- else{
- throw opengm::RuntimeError("wrong starting direction for JumpUpDownGen");
- }
- }
-
- size_t defaultNumStopIt() {return gm_.maxNumberOfLabels();}
-
- void getProposal(const std::vector<LabelType> ¤t , std::vector<LabelType> &proposal)
- {
- for (IndexType vi = 0; vi < gm_.numberOfVariables(); ++vi)
+ public:
+ typedef ACC AccumulationType;
+ typedef GM GraphicalModelType;
+ OPENGM_GM_TYPE_TYPEDEFS;
+ struct Parameter
{
- const size_t numL = gm_.numberOfLabels(vi);
+ Parameter(
+ const std::string startDirection = std::string("up")
+ )
+ : startDirection_(startDirection)
+ {
- const LabelType ol = argBuffer_[vi];
- const LabelType cl = current[vi];
++//<<<<<<< HEAD
++// }
++// template<class P>
++// Parameter(const P & p)
++// : startDirection_(p.startDirection_){
+
- std::copy(current.begin(), current.end(), argBuffer_.begin());
-
- // flip direction?
- if(ol == cl){
- if(jumpSize_[vi] == 1)
- direction_[vi]*=-1;
- else{
- jumpSize_[vi]-=1;
- }
- }
- else{
- jumpSize_[vi]+=1;
++// }
++// std::string startDirection_;
++// };
++// MJumpUpDownGen(const GM &gm, const Parameter ¶m)
++// : gm_(gm),
++// param_(param),
++// argBuffer_(gm.numberOfVariables(),0),
++// direction_(gm.numberOfVariables()),
++// jumpSize_(gm.numberOfVariables(),1)
++// {
++// this->reset();
++// }
++// void reset()
++// {
++// if(param_.startDirection_== std::string("random")){
++// for(size_t i=0; i<gm_.numberOfVariables();++i){
++// direction_[i]=rand()%2 == 0 ? -1:1;
++//=======
++//>>>>>>> e3408d084b219dce69a515117c3c1253e3cb5b7d
}
- const LabelType d = direction_[vi];
- const LabelType js = jumpSize_[vi];
-
- if(d>=1){
-
- if(cl+js < gm_.numberOfLabels(vi)){
- proposal[vi] = cl + js;
- }
- else{
- direction_[vi] = -1;
- proposal[vi] = gm_.numberOfLabels(vi)-1;
- jumpSize_[vi] = 1;
+ std::string startDirection_;
+ };
+ UpDownGen(const GM &gm, const Parameter ¶m)
+ : gm_(gm),
+ param_(param),
+ argBuffer_(gm.numberOfVariables(),0),
+ direction_(gm.numberOfVariables())
+ {
+ this->reset();
+ }
+ void reset()
+ {
+ if(param_.startDirection_== std::string("random")){
+ for(size_t i=0; i<gm_.numberOfVariables();++i){
+ direction_[i]=rand()%2 == 0 ? -1:1;
}
}
- else{
- if(cl>=js){
- proposal[vi] = cl - js;
+ else if(param_.startDirection_== std::string("up")){
+ for(size_t i=0; i<gm_.numberOfVariables();++i){
+ direction_[i]=1;
}
- else{
- direction_[vi] = 1;
- proposal[vi] = 0 ;
- jumpSize_[vi] = 1;
+ }
+ else if(param_.startDirection_== std::string("down")){
+ for(size_t i=0; i<gm_.numberOfVariables();++i){
+ direction_[i]=-1;
}
}
- }
- }
- private:
- const GM &gm_;
- Parameter param_;
- std::vector<LabelType> argBuffer_;
- std::vector<LabelType> direction_;
- std::vector<LabelType> jumpSize_;
- };
-
- template<class GM, class ACC>
- class UpDownGen
- {
- public:
-
- template<class _GM>
- struct RebindGm{
- typedef UpDownGen<_GM, ACC> type;
- };
-
- template<class _GM,class _ACC>
- struct RebindGmAndAcc{
- typedef UpDownGen<_GM, _ACC > type;
- };
-
- typedef ACC AccumulationType;
- typedef GM GraphicalModelType;
- OPENGM_GM_TYPE_TYPEDEFS;
- struct Parameter
- {
- Parameter(
- const std::string startDirection = std::string("up")
- )
- : startDirection_(startDirection)
- {
-
- }
- template<class P>
- Parameter(const P & p)
- : startDirection_(p.startDirection_){
++//<<<<<<< HEAD
++// }
++// }
++//private:
++// const GM &gm_;
++// Parameter param_;
++// std::vector<LabelType> argBuffer_;
++// std::vector<LabelType> direction_;
++// std::vector<LabelType> jumpSize_;
++//};
++
++//template<class GM, class ACC>
++//class JumpUpDownGen
++//{
++//public:
++
++// template<class _GM>
++// struct RebindGm{
++// typedef JumpUpDownGen<_GM, ACC> type;
++// };
++
++// template<class _GM,class _ACC>
++// struct RebindGmAndAcc{
++// typedef JumpUpDownGen<_GM, _ACC > type;
++// };
++// typedef ACC AccumulationType;
++// typedef GM GraphicalModelType;
++// OPENGM_GM_TYPE_TYPEDEFS;
++// struct Parameter
++// {
++// Parameter(
++// const std::string startDirection = std::string("up")
++// )
++// : startDirection_(startDirection)
++// {
++
++// }
++
++// template<class P>
++// Parameter(const P & p)
++// : startDirection_(p.startDirection_){
+
- }
- std::string startDirection_;
- };
- UpDownGen(const GM &gm, const Parameter ¶m)
- : gm_(gm),
- param_(param),
- argBuffer_(gm.numberOfVariables(),0),
- direction_(gm.numberOfVariables())
- {
- this->reset();
- }
- void reset()
- {
- if(param_.startDirection_== std::string("random")){
- for(size_t i=0; i<gm_.numberOfVariables();++i){
- direction_[i]=rand()%2 == 0 ? -1:1;
- }
- }
- else if(param_.startDirection_== std::string("up")){
- for(size_t i=0; i<gm_.numberOfVariables();++i){
- direction_[i]=1;
- }
- }
- else if(param_.startDirection_== std::string("down")){
- for(size_t i=0; i<gm_.numberOfVariables();++i){
- direction_[i]=-1;
++// }
++
++// std::string startDirection_;
++// };
++// JumpUpDownGen(const GM &gm, const Parameter ¶m)
++// : gm_(gm),
++// param_(param),
++// argBuffer_(gm.numberOfVariables(),0),
++// direction_(gm.numberOfVariables()),
++// jumpSize_(gm.numberOfVariables(),1)
++// {
++// this->reset();
++// }
++// void reset()
++// {
++// if(param_.startDirection_== std::string("random")){
++// for(size_t i=0; i<gm_.numberOfVariables();++i){
++// direction_[i]=rand()%2 == 0 ? -1:1;
++// }
++// }
++// else if(param_.startDirection_== std::string("up")){
++// for(size_t i=0; i<gm_.numberOfVariables();++i){
++// direction_[i]=1;
++// }
++// }
++// else if(param_.startDirection_== std::string("down")){
++// for(size_t i=0; i<gm_.numberOfVariables();++i){
++// direction_[i]=-1;
++//=======
+ else{
+ throw opengm::RuntimeError("wrong starting direction for UpDownGen");
++//>>>>>>> e3408d084b219dce69a515117c3c1253e3cb5b7d
}
}
- else{
- throw opengm::RuntimeError("wrong starting direction for UpDownGen");
- }
- }
-
- size_t defaultNumStopIt() {return 2;}
-
- void getProposal(const std::vector<LabelType> ¤t , std::vector<LabelType> &proposal)
- {
- for (IndexType vi = 0; vi < gm_.numberOfVariables(); ++vi)
+
+ size_t defaultNumStopIt() {return 2;}
+
+ void getProposal(const std::vector<LabelType> ¤t , std::vector<LabelType> &proposal)
{
- const size_t numL = gm_.numberOfLabels(vi);
+ for (IndexType vi = 0; vi < gm_.numberOfVariables(); ++vi)
+ {
+ const size_t numL = gm_.numberOfLabels(vi);
- const LabelType ol = argBuffer_[vi];
- const LabelType cl = current[vi];
-
- std::copy(current.begin(), current.end(), argBuffer_.begin());
+ const LabelType ol = argBuffer_[vi];
+ const LabelType cl = current[vi];
+
+ std::copy(current.begin(), current.end(), argBuffer_.begin());
- // flip direction?
- if(ol == cl){
- direction_[vi]*=-1;
- }
- const LabelType d = direction_[vi];
- if(d==1){
-
- if(cl+1<numL){
- proposal[vi] = cl +1;
- }
- else{
- direction_[vi] = -1;
- proposal[vi] = cl - 1 ;
+ // flip direction?
+ if(ol == cl){
+ direction_[vi]*=-1;
}
- }
- else{
- if(cl>=1){
- proposal[vi] = cl - 1;
+ const LabelType d = direction_[vi];
+ if(d==1){
+
+ if(cl+1<numL){
+ proposal[vi] = cl +1;
+ }
+ else{
+ direction_[vi] = -1;
+ proposal[vi] = cl - 1 ;
+ }
}
else{
- direction_[vi] = 1;
- proposal[vi] = cl + 1 ;
+ if(cl>=1){
+ proposal[vi] = cl - 1;
+ }
+ else{
+ direction_[vi] = 1;
+ proposal[vi] = cl + 1 ;
+ }
}
}
- }
- }
- private:
- const GM &gm_;
- Parameter param_;
- std::vector<LabelType> argBuffer_;
- std::vector<LabelType> direction_;
- std::vector<LabelType> jumpSize_;
- };
-
-
- template<class GM, class ACC>
- class AlphaBetaSwapGen
- {
- public:
-
- template<class _GM>
- struct RebindGm{
- typedef AlphaBetaSwapGen<_GM, ACC> type;
- };
-
- template<class _GM,class _ACC>
- struct RebindGmAndAcc{
- typedef AlphaBetaSwapGen<_GM, _ACC > type;
- };
-
- typedef ACC AccumulationType;
- typedef GM GraphicalModelType;
- OPENGM_GM_TYPE_TYPEDEFS;
- struct Parameter
- {
- Parameter(){}
- template<class P>
- Parameter(const P & p){
-
- }
- };
- private:
- static size_t getMaxLabel(const GM &gm){
- size_t maxLabel = 0;
- for(size_t i=0; i<gm.numberOfVariables();++i){
- if(gm.numberOfLabels(i)>maxLabel ){
- maxLabel = gm.numberOfLabels(i);
- }
- }
- return maxLabel;
- }
- public:
- AlphaBetaSwapGen(const GM &gm, const Parameter ¶m)
- : gm_(gm),
- param_(param),
- maxLabel_(getMaxLabel(gm)),
- abShape_(2, maxLabel_),
- abWalker_(abShape_.begin(), 2)
- {
- // ++abWalker_;
- }
- void reset()
- {
- abWalker_.reset();
- }
-
- size_t defaultNumStopIt() {return (maxLabel_*maxLabel_-maxLabel_)/2;}
-
- void getProposal(const std::vector<LabelType> ¤t , std::vector<LabelType> &proposal)
- {
- if( maxLabel_<2){
- for (IndexType vi = 0; vi < gm_.numberOfVariables(); ++vi)
- proposal[vi] = current[vi];
- }else{
- ++abWalker_;
- if(currentAlpha()+1 == maxLabel_ && currentBeta()+1== maxLabel_){
- reset();
- }
- while (abWalker_.coordinateTuple()[0] == abWalker_.coordinateTuple()[1])
- {
- ++abWalker_;
- }
-
- const LabelType alpha = abWalker_.coordinateTuple()[0];
- const LabelType beta = abWalker_.coordinateTuple()[1];
-
- for (IndexType vi = 0; vi < gm_.numberOfVariables(); ++vi)
- {
- if ( current[vi] == alpha && gm_.numberOfLabels(vi) > beta )
- {
- proposal[vi] = beta;
- }
- else if ( current[vi] == beta && gm_.numberOfLabels(vi) > alpha )
- {
- proposal[vi] = alpha;
- }
- else
- {
- proposal[vi] = current[vi];
- }
- }
- }
- }
-
- LabelType currentAlpha()
- {
- return abWalker_.coordinateTuple()[0];
- }
- LabelType currentBeta()
- {
- return abWalker_.coordinateTuple()[1];
- }
- private:
-
- const GM &gm_;
- Parameter param_;
- LabelType maxLabel_;
- std::vector<LabelType> abShape_;
- ShapeWalker<typename std::vector<LabelType>::const_iterator> abWalker_;
-
- };
-
- template<class GM, class ACC>
- class RandomGen
- {
- public:
- template<class _GM>
- struct RebindGm{
- typedef RandomGen<_GM, ACC> type;
- };
-
- template<class _GM,class _ACC>
- struct RebindGmAndAcc{
- typedef RandomGen<_GM, _ACC > type;
- };
-
- typedef ACC AccumulationType;
- typedef GM GraphicalModelType;
- OPENGM_GM_TYPE_TYPEDEFS;
- struct Parameter
- {
- Parameter(){}
- template<class P>
- Parameter(const P & p){
-
- }
- };
- RandomGen(const GM &gm, const Parameter ¶m)
- : gm_(gm),
- param_(param),
- currentStep_(0)
- {
- }
- void reset()
- {
- currentStep_ = 0;
- }
- size_t defaultNumStopIt() {return 10;}
- void getProposal(const std::vector<LabelType> ¤t , std::vector<LabelType> &proposal)
+ }
+ private:
+ const GM &gm_;
+ Parameter param_;
+ std::vector<LabelType> argBuffer_;
+ std::vector<LabelType> direction_;
+ std::vector<LabelType> jumpSize_;
+ };
+
++//<<<<<<< HEAD
++//template<class GM, class ACC>
++//class UpDownGen
++//{
++//public:
++
++// template<class _GM>
++// struct RebindGm{
++// typedef UpDownGen<_GM, ACC> type;
++// };
++
++// template<class _GM,class _ACC>
++// struct RebindGmAndAcc{
++// typedef UpDownGen<_GM, _ACC > type;
++// };
++
++// typedef ACC AccumulationType;
++// typedef GM GraphicalModelType;
++// OPENGM_GM_TYPE_TYPEDEFS;
++// struct Parameter
++// {
++// Parameter(
++// const std::string startDirection = std::string("up")
++// )
++// : startDirection_(startDirection)
++// {
++
++// }
++// template<class P>
++// Parameter(const P & p)
++// : startDirection_(p.startDirection_){
++
++// }
++// std::string startDirection_;
++// };
++// UpDownGen(const GM &gm, const Parameter ¶m)
++// : gm_(gm),
++// param_(param),
++// argBuffer_(gm.numberOfVariables(),0),
++// direction_(gm.numberOfVariables())
++// {
++// this->reset();
++// }
++// void reset()
++//=======
+
+ template<class GM, class ACC>
+ class AlphaBetaSwapGen
++//>>>>>>> e3408d084b219dce69a515117c3c1253e3cb5b7d
{
- for (IndexType vi = 0; vi < gm_.numberOfVariables(); ++vi){
- // draw label
- opengm::RandomUniform<size_t> randomLabel(0, gm_.numberOfLabels(vi),currentStep_+vi);
- proposal[vi] = randomLabel();
+ public:
+ typedef ACC AccumulationType;
+ typedef GM GraphicalModelType;
+ OPENGM_GM_TYPE_TYPEDEFS;
+ struct Parameter
+ {
+ Parameter(){}
+ };
+ private:
+ static size_t getMaxLabel(const GM &gm){
+ size_t maxLabel = 0;
+ for(size_t i=0; i<gm.numberOfVariables();++i){
+ if(gm.numberOfLabels(i)>maxLabel ){
+ maxLabel = gm.numberOfLabels(i);
+ }
+ }
+ return maxLabel;
}
- ++currentStep_;
- }
- private:
- const GM &gm_;
- Parameter param_;
- LabelType currentStep_;
- };
-
-
+ public:
+ AlphaBetaSwapGen(const GM &gm, const Parameter ¶m)
+ : gm_(gm),
+ param_(param),
+ maxLabel_(getMaxLabel(gm)),
+ abShape_(2, maxLabel_),
+ abWalker_(abShape_.begin(), 2)
+ {
+ // ++abWalker_;
+ }
+ void reset()
+ {
+ abWalker_.reset();
+ }
- template<class GM, class ACC>
- class RandomLFGen
- {
- public:
+ size_t defaultNumStopIt() {return (maxLabel_*maxLabel_-maxLabel_)/2;}
- template<class _GM>
- struct RebindGm{
- typedef RandomLFGen<_GM, ACC> type;
- };
+ void getProposal(const std::vector<LabelType> ¤t , std::vector<LabelType> &proposal)
+ {
+ if( maxLabel_<2){
+ for (IndexType vi = 0; vi < gm_.numberOfVariables(); ++vi)
+ proposal[vi] = current[vi];
+ }else{
+ ++abWalker_;
+ if(currentAlpha()+1 == maxLabel_ && currentBeta()+1== maxLabel_){
+ reset();
+ }
+ while (abWalker_.coordinateTuple()[0] == abWalker_.coordinateTuple()[1])
+ {
+ ++abWalker_;
+ }
+
+ const LabelType alpha = abWalker_.coordinateTuple()[0];
+ const LabelType beta = abWalker_.coordinateTuple()[1];
+
+ for (IndexType vi = 0; vi < gm_.numberOfVariables(); ++vi)
+ {
+ if ( current[vi] == alpha && gm_.numberOfLabels(vi) > beta )
+ {
+ proposal[vi] = beta;
+ }
+ else if ( current[vi] == beta && gm_.numberOfLabels(vi) > alpha )
+ {
+ proposal[vi] = alpha;
+ }
+ else
+ {
+ proposal[vi] = current[vi];
+ }
+ }
+ }
+ }
- template<class _GM,class _ACC>
- struct RebindGmAndAcc{
- typedef RandomLFGen<_GM, _ACC > type;
+ LabelType currentAlpha()
+ {
+ return abWalker_.coordinateTuple()[0];
+ }
+ LabelType currentBeta()
+ {
+ return abWalker_.coordinateTuple()[1];
+ }
+ private:
+
++//<<<<<<< HEAD
++//template<class GM, class ACC>
++//class AlphaBetaSwapGen
++//{
++//public:
++
++// template<class _GM>
++// struct RebindGm{
++// typedef AlphaBetaSwapGen<_GM, ACC> type;
++// };
++
++// template<class _GM,class _ACC>
++// struct RebindGmAndAcc{
++// typedef AlphaBetaSwapGen<_GM, _ACC > type;
++// };
++
++// typedef ACC AccumulationType;
++// typedef GM GraphicalModelType;
++// OPENGM_GM_TYPE_TYPEDEFS;
++// struct Parameter
++// {
++// Parameter(){}
++// template<class P>
++// Parameter(const P & p){
++
++// }
++//=======
+ const GM &gm_;
+ Parameter param_;
+ LabelType maxLabel_;
+ std::vector<LabelType> abShape_;
+ ShapeWalker<typename std::vector<LabelType>::const_iterator> abWalker_;
+
++//>>>>>>> e3408d084b219dce69a515117c3c1253e3cb5b7d
};
- typedef ACC AccumulationType;
- typedef GM GraphicalModelType;
- OPENGM_GM_TYPE_TYPEDEFS;
- struct Parameter
+ template<class GM, class ACC>
+ class RandomGen
{
- Parameter(){}
- template<class P>
- Parameter(const P & p){
-
+ public:
+ typedef ACC AccumulationType;
+ typedef GM GraphicalModelType;
+ OPENGM_GM_TYPE_TYPEDEFS;
+ struct Parameter
+ {
+ Parameter(){}
+ };
+ RandomGen(const GM &gm, const Parameter ¶m)
+ : gm_(gm),
+ param_(param),
+ currentStep_(0)
+ {
+ }
+ void reset()
+ {
+ currentStep_ = 0;
+ }
+ size_t defaultNumStopIt() {return 10;}
+ void getProposal(const std::vector<LabelType> ¤t , std::vector<LabelType> &proposal)
+ {
+ for (IndexType vi = 0; vi < gm_.numberOfVariables(); ++vi){
+ // draw label
+ opengm::RandomUniform<size_t> randomLabel(0, gm_.numberOfLabels(vi),currentStep_+vi);
+ proposal[vi] = randomLabel();
+ }
+ ++currentStep_;
}
+ private:
+ const GM &gm_;
+ Parameter param_;
+ LabelType currentStep_;
};
- RandomLFGen(const GM &gm, const Parameter ¶m)
- : gm_(gm),
- param_(param),
- currentStep_(0)
- {
- }
- void reset()
- {
- currentStep_ = 0;
- }
- size_t defaultNumStopIt() {return 10;}
- void getProposal(const std::vector<LabelType> ¤t , std::vector<LabelType> &proposal)
+
+
++//<<<<<<< HEAD
++//template<class GM, class ACC>
++//class RandomGen
++//{
++//public:
++// template<class _GM>
++// struct RebindGm{
++// typedef RandomGen<_GM, ACC> type;
++// };
++
++// template<class _GM,class _ACC>
++// struct RebindGmAndAcc{
++// typedef RandomGen<_GM, _ACC > type;
++// };
++
++// typedef ACC AccumulationType;
++// typedef GM GraphicalModelType;
++// OPENGM_GM_TYPE_TYPEDEFS;
++// struct Parameter
++// {
++// Parameter(){}
++// template<class P>
++// Parameter(const P & p){
++
++// }
++// };
++// RandomGen(const GM &gm, const Parameter ¶m)
++// : gm_(gm),
++// param_(param),
++// currentStep_(0)
++// {
++// }
++// void reset()
++// {
++// currentStep_ = 0;
++// }
++// size_t defaultNumStopIt() {return 10;}
++// void getProposal(const std::vector<LabelType> ¤t , std::vector<LabelType> &proposal)
++// {
++// for (IndexType vi = 0; vi < gm_.numberOfVariables(); ++vi){
++// // draw label
++// opengm::RandomUniform<size_t> randomLabel(0, gm_.numberOfLabels(vi),currentStep_+vi);
++// proposal[vi] = randomLabel();
++//=======
+ template<class GM, class ACC>
+ class Random2Gen
{
- for (IndexType vi = 0; vi < gm_.numberOfVariables(); ++vi){
- // draw label
- opengm::RandomUniform<size_t> randomLabel(0, gm_.numberOfLabels(vi),currentStep_+vi);
- proposal[vi] = randomLabel();
+ public:
+ typedef ACC AccumulationType;
+ typedef GM GraphicalModelType;
+ OPENGM_GM_TYPE_TYPEDEFS;
+ struct Parameter
+ {
+ Parameter(){}
+ };
+ Random2Gen(const GM &gm, const Parameter ¶m)
+ : gm_(gm),
+ param_(param),
+ currentStep_(0)
+ {
++//>>>>>>> e3408d084b219dce69a515117c3c1253e3cb5b7d
}
- typename opengm::LazyFlipper<GM,ACC>::Parameter para(size_t(1));
- opengm::LazyFlipper<GM,ACC> lf(gm_,para);
- lf.setStartingPoint(proposal.begin());
- lf.infer();
- lf.arg(proposal);
- ++currentStep_;
- }
- private:
- const GM &gm_;
- Parameter param_;
- LabelType currentStep_;
- };
+ void reset()
+ {
+ currentStep_ = 0;
+ }
+ size_t defaultNumStopIt() {return 4;}
+ void getProposal(const std::vector<LabelType> ¤t , std::vector<LabelType> &proposal)
+ {
+ for (IndexType vi = 0; vi < gm_.numberOfVariables(); ++vi){
+ // draw label
+ opengm::RandomUniform<size_t> randomLabel(0,3,currentStep_+vi);
+ proposal[vi] = std::min(randomLabel(),size_t(1));
+ }
+ ++currentStep_;
+ }
+ private:
+ const GM &gm_;
+ Parameter param_;
+ LabelType currentStep_;
+ };
- template<class GM, class ACC>
- class NonUniformRandomGen
- {
- public:
- template<class _GM>
- struct RebindGm{
- typedef NonUniformRandomGen<_GM, ACC> type;
- };
++//<<<<<<< HEAD
++//template<class GM, class ACC>
++//class RandomLFGen
++//{
++//public:
+
- template<class _GM,class _ACC>
- struct RebindGmAndAcc{
- typedef NonUniformRandomGen<_GM, _ACC > type;
- };
++// template<class _GM>
++// struct RebindGm{
++// typedef RandomLFGen<_GM, ACC> type;
++// };
+
- typedef ACC AccumulationType;
- typedef GM GraphicalModelType;
- OPENGM_GM_TYPE_TYPEDEFS;
- struct Parameter
++// template<class _GM,class _ACC>
++// struct RebindGmAndAcc{
++// typedef RandomLFGen<_GM, _ACC > type;
++// };
++
++// typedef ACC AccumulationType;
++// typedef GM GraphicalModelType;
++// OPENGM_GM_TYPE_TYPEDEFS;
++// struct Parameter
++// {
++// Parameter(){}
++// template<class P>
++// Parameter(const P & p){
++
++// }
++// };
++// RandomLFGen(const GM &gm, const Parameter ¶m)
++// : gm_(gm),
++// param_(param),
++// currentStep_(0)
++// {
++// }
++// void reset()
++//=======
+ template<class GM, class ACC>
+ class RandomLFGen
++//>>>>>>> e3408d084b219dce69a515117c3c1253e3cb5b7d
{
- Parameter(const float temp=1.0)
- : temp_(temp){
+ public:
+ typedef ACC AccumulationType;
+ typedef GM GraphicalModelType;
+ OPENGM_GM_TYPE_TYPEDEFS;
+ struct Parameter
+ {
+ Parameter(){}
+ };
+ RandomLFGen(const GM &gm, const Parameter ¶m)
+ : gm_(gm),
+ param_(param),
+ currentStep_(0)
+ {
}
- template<class P>
- Parameter(const P & p)
- : temp_(p.temp_){
++//<<<<<<< HEAD
++// typename opengm::LazyFlipper<GM,ACC>::Parameter para(size_t(1));
++// opengm::LazyFlipper<GM,ACC> lf(gm_,para);
++// lf.setStartingPoint(proposal.begin());
++// lf.infer();
++// lf.arg(proposal);
++// ++currentStep_;
++// }
++//private:
++// const GM &gm_;
++// Parameter param_;
++// LabelType currentStep_;
++//};
++
++
++//template<class GM, class ACC>
++//class NonUniformRandomGen
++//{
++//public:
++
++// template<class _GM>
++// struct RebindGm{
++// typedef NonUniformRandomGen<_GM, ACC> type;
++// };
++
++// template<class _GM,class _ACC>
++// struct RebindGmAndAcc{
++// typedef NonUniformRandomGen<_GM, _ACC > type;
++// };
++
++// typedef ACC AccumulationType;
++// typedef GM GraphicalModelType;
++// OPENGM_GM_TYPE_TYPEDEFS;
++// struct Parameter
++// {
++// Parameter(const float temp=1.0)
++// : temp_(temp){
++// }
++// template<class P>
++// Parameter(const P & p)
++// : temp_(p.temp_){
+
++// }
++// float temp_;
++//=======
+ void reset()
+ {
+ currentStep_ = 0;
+ }
+ size_t defaultNumStopIt() {return 10;}
+ void getProposal(const std::vector<LabelType> ¤t , std::vector<LabelType> &proposal)
+ {
+ for (IndexType vi = 0; vi < gm_.numberOfVariables(); ++vi){
+ // draw label
+ opengm::RandomUniform<size_t> randomLabel(0, gm_.numberOfLabels(vi),currentStep_+vi);
+ proposal[vi] = randomLabel();
+ }
+ typename opengm::LazyFlipper<GM,ACC>::Parameter para(1,proposal.begin(),proposal.end());
+ opengm::LazyFlipper<GM,ACC> lf(gm_,para);
+ lf.infer();
+ lf.arg(proposal);
+ ++currentStep_;
}
- float temp_;
+ private:
+ const GM &gm_;
+ Parameter param_;
+ LabelType currentStep_;
++//>>>>>>> e3408d084b219dce69a515117c3c1253e3cb5b7d
};
- NonUniformRandomGen(const GM &gm, const Parameter ¶m)
- : gm_(gm),
- param_(param),
- currentStep_(0),
- randomGens_(gm.numberOfVariables())
+
+ template<class GM, class ACC>
+ class NonUniformRandomGen
{
- std::vector<bool> hasUnary(gm.numberOfVariables(),false);
+ public:
+ typedef ACC AccumulationType;
+ typedef GM GraphicalModelType;
+ OPENGM_GM_TYPE_TYPEDEFS;
+ struct Parameter
+ {
+ Parameter(const float temp=1.0)
+ : temp_(temp){
+ }
+ float temp_;
+ };
- for(IndexType fi=0; fi<gm_.numberOfFactors(); ++fi){
+ NonUniformRandomGen(const GM &gm, const Parameter ¶m)
+ : gm_(gm),
+ param_(param),
+ currentStep_(0),
+ randomGens_(gm.numberOfVariables())
+ {
+ std::vector<bool> hasUnary(gm.numberOfVariables(),false);
- if(gm_[fi].numberOfVariables()==1){
+ for(IndexType fi=0; fi<gm_.numberOfFactors(); ++fi){
- const IndexType vi = gm_[fi].variableIndex(0);
- const LabelType numLabels = gm_.numberOfLabels(vi);
- std::vector<ValueType> weights(numLabels);
- gm_[fi].copyValues(&weights[0]);
- const ValueType minValue = *std::min_element(weights.begin(),weights.end());
- for(LabelType l=0; l<numLabels; ++l){
- weights[l]-= minValue;
- }
- for(LabelType l=0; l<numLabels; ++l){
- //OPENGM_CHECK_OP(weights[l],>=,0.0, "NonUniformRandomGen allows only positive unaries");
- weights[l]=std::exp(-1.0*param_.temp_*weights[l]);
+ if(gm_[fi].numberOfVariables()==1){
+
+ const IndexType vi = gm_[fi].variableIndex(0);
+ const LabelType numLabels = gm_.numberOfLabels(vi);
+ std::vector<ValueType> weights(numLabels);
+ gm_[fi].copyValues(&weights[0]);
+ const ValueType minValue = *std::min_element(weights.begin(),weights.end());
+ for(LabelType l=0; l<numLabels; ++l){
+ weights[l]-= minValue;
+ }
+ for(LabelType l=0; l<numLabels; ++l){
+ //OPENGM_CHECK_OP(weights[l],>=,0.0, "NonUniformRandomGen allows only positive unaries");
+ weights[l]=std::exp(-1.0*param_.temp_*weights[l]);
+ }
+ randomGens_[vi]=GenType(weights.begin(),weights.end());
+ hasUnary[vi]=true;
}
- randomGens_[vi]=GenType(weights.begin(),weights.end());
- hasUnary[vi]=true;
}
- }
- for(IndexType vi=0 ;vi<gm_.numberOfVariables(); ++vi){
- if(!hasUnary[vi]){
- const LabelType numLabels = gm_.numberOfLabels(vi);
- std::vector<ValueType> weights(numLabels,1.0);
- randomGens_[vi]=GenType(weights.begin(),weights.end());
+ for(IndexType vi=0 ;vi<gm_.numberOfVariables(); ++vi){
+ if(!hasUnary[vi]){
+ const LabelType numLabels = gm_.numberOfLabels(vi);
+ std::vector<ValueType> weights(numLabels,1.0);
+ randomGens_[vi]=GenType(weights.begin(),weights.end());
+ }
}
- }
-
- }
- void reset()
- {
- currentStep_ = 0;
- }
-
- size_t defaultNumStopIt() {
- return 10;
- }
- void getProposal(const std::vector<LabelType> ¤t , std::vector<LabelType> &proposal)
- {
- for (IndexType vi = 0; vi < gm_.numberOfVariables(); ++vi){
- proposal[vi]=randomGens_[vi]();
}
- ++currentStep_;
- }
- private:
- const GM &gm_;
- Parameter param_;
- LabelType currentStep_;
-
- typedef RandomDiscreteWeighted<LabelType,ValueType> GenType;
- std::vector < RandomDiscreteWeighted<LabelType,ValueType> > randomGens_;
- };
+ void reset()
+ {
+ currentStep_ = 0;
+ }
+ size_t defaultNumStopIt() {
+ return 10;
+ }
+ void getProposal(const std::vector<LabelType> ¤t , std::vector<LabelType> &proposal)
+ {
+ for (IndexType vi = 0; vi < gm_.numberOfVariables(); ++vi){
+ proposal[vi]=randomGens_[vi]();
+ }
+ ++currentStep_;
+ }
+ private:
+ const GM &gm_;
+ Parameter param_;
+ LabelType currentStep_;
- template<class GM, class ACC>
- class BlurGen
- {
- public:
+ typedef RandomDiscreteWeighted<LabelType,ValueType> GenType;
- template<class _GM>
- struct RebindGm{
- typedef BlurGen<_GM, ACC> type;
+ std::vector < RandomDiscreteWeighted<LabelType,ValueType> > randomGens_;
};
- template<class _GM,class _ACC>
- struct RebindGmAndAcc{
- typedef BlurGen<_GM, _ACC > type;
- };
- typedef ACC AccumulationType;
- typedef GM GraphicalModelType;
- OPENGM_GM_TYPE_TYPEDEFS;
- struct Parameter
- {
- Parameter(double sigma = 20.0) : sigma_(sigma)
- {
- }
- template<class P>
- Parameter(const P & p)
- : sigma_(p.sigma_){
++//<<<<<<< HEAD
++//template<class GM, class ACC>
++//class BlurGen
++//{
++//public:
++
++// template<class _GM>
++// struct RebindGm{
++// typedef BlurGen<_GM, ACC> type;
++// };
++
++// template<class _GM,class _ACC>
++// struct RebindGmAndAcc{
++// typedef BlurGen<_GM, _ACC > type;
++// };
++
++// typedef ACC AccumulationType;
++// typedef GM GraphicalModelType;
++// OPENGM_GM_TYPE_TYPEDEFS;
++// struct Parameter
++// {
++// Parameter(double sigma = 20.0) : sigma_(sigma)
++// {
++// }
++// template<class P>
++// Parameter(const P & p)
++// : sigma_(p.sigma_){
+
++// }
++// double sigma_;
++// };
++// BlurGen(const GM &gm, const Parameter ¶m)
++// : gm_(gm),
++// param_(param),
++// currentStep_(0)
++// {
++// const double pi = 3.1416;
++// const double oneOverSqrt2PiSigmaSquared = 1.0 / (std::sqrt(2.0 * pi) * param_.sigma_);
++// const double oneOverTwoSigmaSquared = 1.0 / (2.0* param_.sigma_ * param_.sigma_);
++// const size_t kradius = std::ceil(3*param_.sigma_);
++// kernel_.resize(2*kradius + 1);
++// double sum = 0;
++// for(double i = 0; i <= kradius ; ++i) {
++// double value = oneOverSqrt2PiSigmaSquared * std::exp(-(i*i)*oneOverTwoSigmaSquared);
++// kernel_[kradius+i] = value;
++// kernel_[kradius-i] = value;
++// sum += 2*value;
++// }
++// for(double i = 0; i <= kradius ; ++i) {
++// kernel_[kradius+i] /= sum;
++// kernel_[kradius-i] /= sum;
++// }
++
++// size_t N = gm_.numberOfFactors(0);
++// for(size_t i=1; i<gm_.numberOfVariables(); ++i){
++// if(N==gm_.numberOfFactors(i)){
++// height_ = i+1;
++// break;
++// }
++// }
++//=======
+ template<class GM, class ACC>
+ class BlurGen
+ {
+ public:
+ typedef ACC AccumulationType;
+ typedef GM GraphicalModelType;
+ OPENGM_GM_TYPE_TYPEDEFS;
+ struct Parameter
+ {
+ Parameter(double sigma = 20.0) : sigma_(sigma)
+ {
+ }
+ double sigma_;
+ };
+ BlurGen(const GM &gm, const Parameter ¶m)
+ : gm_(gm),
+ param_(param),
+ currentStep_(0)
+ {
+ const double pi = 3.1416;
+ const double oneOverSqrt2PiSigmaSquared = 1.0 / (std::sqrt(2.0 * pi) * param_.sigma_);
+ const double oneOverTwoSigmaSquared = 1.0 / (2.0* param_.sigma_ * param_.sigma_);
+ const size_t kradius = std::ceil(3*param_.sigma_);
+ kernel_.resize(2*kradius + 1);
+ double sum = 0;
+ for(double i = 0; i <= kradius ; ++i) {
+ double value = oneOverSqrt2PiSigmaSquared * std::exp(-(i*i)*oneOverTwoSigmaSquared);
+ kernel_[kradius+i] = value;
+ kernel_[kradius-i] = value;
+ sum += 2*value;
+ }
+ for(double i = 0; i <= kradius ; ++i) {
+ kernel_[kradius+i] /= sum;
+ kernel_[kradius-i] /= sum;
+ }
+
+ size_t N = gm_.numberOfFactors(0);
+ for(size_t i=1; i<gm_.numberOfVariables(); ++i){
+ if(N==gm_.numberOfFactors(i)){
+ height_ = i+1;
+ break;
+ }
+ }
+
+ width_ = gm_.numberOfVariables()/height_;
+
+ OPENGM_ASSERT(height_*width_ == gm_.numberOfVariables());
+
+ //Generate blured label
+ bluredLabel_.resize(gm_.numberOfVariables(),0);
+ std::vector<double> temp(gm_.numberOfVariables(),0.0);
+ std::vector<LabelType> localLabel(gm_.numberOfVariables(),0);
+ for (size_t i=0; i<gm_.numberOfVariables(); ++i){
+ for(typename GM::ConstFactorIterator it=gm_.factorsOfVariableBegin(i); it!=gm_.factorsOfVariableEnd(i);++it){
+ if(gm_[*it].numberOfVariables() == 1){
+ ValueType v;
+ ACC::neutral(v);
+ for(LabelType l=0; l<gm_.numberOfLabels(i); ++l){
+ if(ACC::bop(gm_[*it](&l),v)){
+ v=gm_[*it](&l);
+ localLabel[i]=l;
+ }
+ }
+ continue;
+ }
+ }
+ }
+ const int radius = (kernel_.size()-1)/2;
+ const int h = height_-1;
+ const int w = width_ -1;
+ for (int i = 0; i < height_; ++i) {
+ for (int j = 0; j < width_; ++j) {
+ double val = 0.0;
+ for (int k = 0; k < 2*radius+1; ++k) {
+ int i2 = std::min( h,std::max(0,i-radius+k));
+ val += kernel_[k] * localLabel[ind(i2,j)];
+ }
+ temp[ind(i,j)] = val;
+ }
+ }
+ for (int i = 0; i < height_; ++i) {
+ for (int j = 0; j < width_; ++j) {
+ double val = 0.0;
+ for (int k = 0; k < 2*radius+1; ++k) {
+ int j2 = std::min(w,std::max(0,i-radius+k));
+ val += kernel_[k] * temp[ind(i, j2)];
+ }
+ bluredLabel_[ind(i,j)] = std::min(double(gm_.numberOfLabels(ind(i,j))),(std::max(0.0,val)));
+ }
+ }
}
- double sigma_;
++//>>>>>>> e3408d084b219dce69a515117c3c1253e3cb5b7d
+
+ void reset(){}
+ size_t defaultNumStopIt() {return 10;}
+
+ void getProposal(const std::vector<LabelType> ¤t , std::vector<LabelType> &proposal)
+ {
+ if ((currentStep_ % 2) == 0){
+ for (int i = 0; i < height_; ++i) {
+ for (int j = 0; j < width_; ++j) {
+ const size_t var = ind(i,j);
+ opengm::RandomUniform<size_t> randomLabel(0, gm_.numberOfLabels(var),currentStep_+i+j);
+ proposal[var] = (LabelType)(randomLabel());
+ }
+ }
+ }else{
+ proposal.resize(gm_.numberOfVariables(),0.0);
+ opengm::RandomUniform<double> randomLabel(-param_.sigma_*1.5, param_.sigma_*1.5,currentStep_);
+ for(size_t i=0; i<proposal.size();++i){
+ proposal[i] = std::min(gm_.numberOfLabels(i), (LabelType)(std::max(0.0,bluredLabel_[i] + randomLabel())));
+ }
+ }
+ ++currentStep_;
+ }
+ private:
+ size_t ind(int i, int j){ return i+j*height_;}
+ const GM &gm_;
+ Parameter param_;
+ size_t height_;
+ size_t width_;
+ std::vector<double> kernel_;
+ std::vector<double> bluredLabel_;
+ LabelType currentStep_;
};
- BlurGen(const GM &gm, const Parameter ¶m)
- : gm_(gm),
- param_(param),
- currentStep_(0)
- {
- const double pi = 3.1416;
- const double oneOverSqrt2PiSigmaSquared = 1.0 / (std::sqrt(2.0 * pi) * param_.sigma_);
- const double oneOverTwoSigmaSquared = 1.0 / (2.0* param_.sigma_ * param_.sigma_);
- const size_t kradius = std::ceil(3*param_.sigma_);
- kernel_.resize(2*kradius + 1);
- double sum = 0;
- for(double i = 0; i <= kradius ; ++i) {
- double value = oneOverSqrt2PiSigmaSquared * std::exp(-(i*i)*oneOverTwoSigmaSquared);
- kernel_[kradius+i] = value;
- kernel_[kradius-i] = value;
- sum += 2*value;
- }
- for(double i = 0; i <= kradius ; ++i) {
- kernel_[kradius+i] /= sum;
- kernel_[kradius-i] /= sum;
- }
-
- size_t N = gm_.numberOfFactors(0);
- for(size_t i=1; i<gm_.numberOfVariables(); ++i){
- if(N==gm_.numberOfFactors(i)){
- height_ = i+1;
- break;
- }
- }
- width_ = gm_.numberOfVariables()/height_;
- OPENGM_ASSERT(height_*width_ == gm_.numberOfVariables());
-
- //Generate blured label
- bluredLabel_.resize(gm_.numberOfVariables(),0);
- std::vector<double> temp(gm_.numberOfVariables(),0.0);
- std::vector<LabelType> localLabel(gm_.numberOfVariables(),0);
- for (size_t i=0; i<gm_.numberOfVariables(); ++i){
- for(typename GM::ConstFactorIterator it=gm_.factorsOfVariableBegin(i); it!=gm_.factorsOfVariableEnd(i);++it){
- if(gm_[*it].numberOfVariables() == 1){
- ValueType v;
- ACC::neutral(v);
- for(LabelType l=0; l<gm_.numberOfLabels(i); ++l){
- if(ACC::bop(gm_[*it](&l),v)){
- v=gm_[*it](&l);
- localLabel[i]=l;
- }
- }
- continue;
- }
- }
- }
- const int radius = (kernel_.size()-1)/2;
- const int h = height_-1;
- const int w = width_ -1;
- for (int i = 0; i < height_; ++i) {
- for (int j = 0; j < width_; ++j) {
- double val = 0.0;
- for (int k = 0; k < 2*radius+1; ++k) {
- int i2 = std::min( h,std::max(0,i-radius+k));
- val += kernel_[k] * localLabel[ind(i2,j)];
+ template<class GM, class ACC>
+ class EnergyBlurGen
+ {
+ public:
+ typedef ACC AccumulationType;
+ typedef GM GraphicalModelType;
+ OPENGM_GM_TYPE_TYPEDEFS;
+ struct Parameter
+ {
+ Parameter(double sigma = 20.0, bool useLocalMargs = false, double temp=1) : sigma_(sigma), useLocalMargs_(useLocalMargs), temp_(temp)
+ {
}
- temp[ind(i,j)] = val;
- }
- }
- for (int i = 0; i < height_; ++i) {
- for (int j = 0; j < width_; ++j) {
- double val = 0.0;
- for (int k = 0; k < 2*radius+1; ++k) {
- int j2 = std::min(w,std::max(0,i-radius+k));
- val += kernel_[k] * temp[ind(i, j2)];
+ double sigma_;
+ bool useLocalMargs_;
+ double temp_;
+
+ };
+ EnergyBlurGen(const GM &gm, const Parameter ¶m)
+ : gm_(gm),
+ param_(param),
+ currentStep_(0)
+ {
+ const double pi = 3.1416;
+ const double oneOverSqrt2PiSigmaSquared = 1.0 / (std::sqrt(2.0 * pi) * param_.sigma_);
+ const double oneOverTwoSigmaSquared = 1.0 / (2.0* param_.sigma_ * param_.sigma_);
+ const size_t kradius = std::ceil(3*param_.sigma_);
+ std::vector<double> kernel;
+ kernel.resize(2*kradius + 1);
+ double sum = 0;
+ for(double i = 0; i <= kradius ; ++i) {
+ double value = oneOverSqrt2PiSigmaSquared * std::exp(-(i*i)*oneOverTwoSigmaSquared);
+ kernel[kradius+i] = value;
+ kernel[kradius-i] = value;
+ sum += 2*value;
+ }
+ for(double i = 0; i <= kradius ; ++i) {
+ kernel[kradius+i] /= sum;
+ kernel[kradius-i] /= sum;
}
- bluredLabel_[ind(i,j)] = std::min(double(gm_.numberOfLabels(ind(i,j))),(std::max(0.0,val)));
- }
- }
- }
- void reset(){}
- size_t defaultNumStopIt() {return 10;}
-
- void getProposal(const std::vector<LabelType> ¤t , std::vector<LabelType> &proposal)
- {
- if ((currentStep_ % 2) == 0){
- for (int i = 0; i < height_; ++i) {
- for (int j = 0; j < width_; ++j) {
- const size_t var = ind(i,j);
- opengm::RandomUniform<size_t> randomLabel(0, gm_.numberOfLabels(var),currentStep_+i+j);
- proposal[var] = (LabelType)(randomLabel());
+ size_t N = gm_.numberOfFactors(0);
+ for(size_t i=1; i<gm_.numberOfVariables(); ++i){
+ if(N==gm_.numberOfFactors(i)){
+ height_ = i+1;
+ break;
+ }
}
- }
- }else{
- proposal.resize(gm_.numberOfVariables(),0.0);
- opengm::RandomUniform<double> randomLabel(-param_.sigma_*1.5, param_.sigma_*1.5,currentStep_);
- for(size_t i=0; i<proposal.size();++i){
- proposal[i] = std::min(gm_.numberOfLabels(i), (LabelType)(std::max(0.0,bluredLabel_[i] + randomLabel())));
- }
- }
- ++currentStep_;
- }
- private:
- size_t ind(int i, int j){ return i+j*height_;}
- const GM &gm_;
- Parameter param_;
- size_t height_;
- size_t width_;
- std::vector<double> kernel_;
- std::vector<double> bluredLabel_;
- LabelType currentStep_;
- };
-
-
- template<class GM, class ACC>
- class EnergyBlurGen
- {
- public:
- template<class _GM>
- struct RebindGm{
- typedef EnergyBlurGen<_GM, ACC> type;
- };
-
- template<class _GM,class _ACC>
- struct RebindGmAndAcc{
- typedef EnergyBlurGen<_GM, _ACC > type;
- };
- typedef ACC AccumulationType;
- typedef GM GraphicalModelType;
- OPENGM_GM_TYPE_TYPEDEFS;
- struct Parameter
- {
- Parameter(double sigma = 20.0, bool useLocalMargs = false, double temp=1) : sigma_(sigma), useLocalMargs_(useLocalMargs), temp_(temp)
- {
- }
- double sigma_;
- bool useLocalMargs_;
- double temp_;
+ width_ = gm_.numberOfVariables()/height_;
+
++//<<<<<<< HEAD
++//template<class GM, class ACC>
++//class EnergyBlurGen
++//{
++//public:
++// template<class _GM>
++// struct RebindGm{
++// typedef EnergyBlurGen<_GM, ACC> type;
++// };
++
++// template<class _GM,class _ACC>
++// struct RebindGmAndAcc{
++// typedef EnergyBlurGen<_GM, _ACC > type;
++// };
++
++// typedef ACC AccumulationType;
++// typedef GM GraphicalModelType;
++// OPENGM_GM_TYPE_TYPEDEFS;
++// struct Parameter
++// {
++// Parameter(double sigma = 20.0, bool useLocalMargs = false, double temp=1) : sigma_(sigma), useLocalMargs_(useLocalMargs), temp_(temp)
++// {
++// }
++// double sigma_;
++// bool useLocalMargs_;
++// double temp_;
+
- template<class P>
- Parameter(const P & p)
- : sigma_(p.sigma_),
- useLocalMargs_(p.useLocalMargs_),
- temp_(p.temp_){
++// template<class P>
++// Parameter(const P & p)
++// : sigma_(p.sigma_),
++// useLocalMargs_(p.useLocalMargs_),
++// temp_(p.temp_){
+
- }
-
- };
- EnergyBlurGen(const GM &gm, const Parameter ¶m)
- : gm_(gm),
- param_(param),
- currentStep_(0)
- {
- const double pi = 3.1416;
- const double oneOverSqrt2PiSigmaSquared = 1.0 / (std::sqrt(2.0 * pi) * param_.sigma_);
- const double oneOverTwoSigmaSquared = 1.0 / (2.0* param_.sigma_ * param_.sigma_);
- const size_t kradius = std::ceil(3*param_.sigma_);
- std::vector<double> kernel;
- kernel.resize(2*kradius + 1);
- double sum = 0;
- for(double i = 0; i <= kradius ; ++i) {
- double value = oneOverSqrt2PiSigmaSquared * std::exp(-(i*i)*oneOverTwoSigmaSquared);
- kernel[kradius+i] = value;
- kernel[kradius-i] = value;
- sum += 2*value;
- }
- for(double i = 0; i <= kradius ; ++i) {
- kernel[kradius+i] /= sum;
- kernel[kradius-i] /= sum;
- }
-
- size_t N = gm_.numberOfFactors(0);
- for(size_t i=1; i<gm_.numberOfVariables(); ++i){
- if(N==gm_.numberOfFactors(i)){
- height_ = i+1;
- break;
- }
- }
-
- width_ = gm_.numberOfVariables()/height_;
-
- OPENGM_ASSERT(height_*width_ == gm_.numberOfVariables());
-
- //Generate energy-blured label
- size_t numLabels =gm_.numberOfLabels(0);
- std::vector<double> temp(gm_.numberOfVariables(),0.0);
- std::vector<double> bluredEnergy(gm_.numberOfVariables(),1000000000000.0);
- std::vector<double> bluredOpt(gm_.numberOfVariables(),0);
- std::vector<double> energy(gm_.numberOfVariables(),0.0);
- std::vector<IndexType> unaries(gm_.numberOfVariables());
- std::vector<std::vector<double> > margs;;
- if(param_.useLocalMargs_)
- margs.resize(gm_.numberOfVariables(),std::vector<double>(numLabels));
++// }
++
++// };
++// EnergyBlurGen(const GM &gm, const Parameter ¶m)
++// : gm_(gm),
++// param_(param),
++// currentStep_(0)
++// {
++// const double pi = 3.1416;
++// const double oneOverSqrt2PiSigmaSquared = 1.0 / (std::sqrt(2.0 * pi) * param_.sigma_);
++// const double oneOverTwoSigmaSquared = 1.0 / (2.0* param_.sigma_ * param_.sigma_);
++// const size_t kradius = std::ceil(3*param_.sigma_);
++// std::vector<double> kernel;
++// kernel.resize(2*kradius + 1);
++// double sum = 0;
++// for(double i = 0; i <= kradius ; ++i) {
++// double value = oneOverSqrt2PiSigmaSquared * std::exp(-(i*i)*oneOverTwoSigmaSquared);
++// kernel[kradius+i] = value;
++// kernel[kradius-i] = value;
++// sum += 2*value;
++// }
++// for(double i = 0; i <= kradius ; ++i) {
++// kernel[kradius+i] /= sum;
++// kernel[kradius-i] /= sum;
++// }
++
++// size_t N = gm_.numberOfFactors(0);
++// for(size_t i=1; i<gm_.numberOfVariables(); ++i){
++// if(N==gm_.numberOfFactors(i)){
++// height_ = i+1;
++// break;
++// }
++// }
++
++// width_ = gm_.numberOfVariables()/height_;
++
++// OPENGM_ASSERT(height_*width_ == gm_.numberOfVariables());
++
++// //Generate energy-blured label
++// size_t numLabels =gm_.numberOfLabels(0);
++// std::vector<double> temp(gm_.numberOfVariables(),0.0);
++// std::vector<double> bluredEnergy(gm_.numberOfVariables(),1000000000000.0);
++// std::vector<double> bluredOpt(gm_.numberOfVariables(),0);
++// std::vector<double> energy(gm_.numberOfVariables(),0.0);
++// std::vector<IndexType> unaries(gm_.numberOfVariables());
++// std::vector<std::vector<double> > margs;;
++// if(param_.useLocalMargs_)
++// margs.resize(gm_.numberOfVariables(),std::vector<double>(numLabels));
+
- for (size_t i=0; i<gm_.numberOfVariables(); ++i){
- bool found = false;
- for(typename GM::ConstFactorIterator it=gm_.factorsOfVariableBegin(i); it!=gm_.factorsOfVariableEnd(i);++it){
- if(gm_[*it].numberOfVariables() == 1){
- unaries[i] = *it;
- found = true;
- if(gm_[*it].numberOfLabels(0) != numLabels)
- throw RuntimeError("number of labels are not equal for all variables");
- continue;
- }
- }
- if(!found)
- throw RuntimeError("missing unary");
- }
++// for (size_t i=0; i<gm_.numberOfVariables(); ++i){
++// bool found = false;
++// for(typename GM::ConstFactorIterator it=gm_.factorsOfVariableBegin(i); it!=gm_.factorsOfVariableEnd(i);++it){
++// if(gm_[*it].numberOfVariables() == 1){
++// unaries[i] = *it;
++// found = true;
++// if(gm_[*it].numberOfLabels(0) != numLabels)
++// throw RuntimeError("number of labels are not equal for all variables");
++// continue;
++// }
++// }
++// if(!found)
++// throw RuntimeError("missing unary");
++// }
+
++//=======
+ OPENGM_ASSERT(height_*width_ == gm_.numberOfVariables());
++//>>>>>>> e3408d084b219dce69a515117c3c1253e3cb5b7d
+
+ //Generate energy-blured label
+ size_t numLabels =gm_.numberOfLabels(0);
+ std::vector<double> temp(gm_.numberOfVariables(),0.0);
+ std::vector<double> bluredEnergy(gm_.numberOfVariables(),1000000000000.0);
+ std::vector<double> bluredOpt(gm_.numberOfVariables(),0);
+ std::vector<double> energy(gm_.numberOfVariables(),0.0);
+ std::vector<IndexType> unaries(gm_.numberOfVariables());
+ std::vector<std::vector<double> > margs;;
+ if(param_.useLocalMargs_)
+ margs.resize(gm_.numberOfVariables(),std::vector<double>(numLabels));
+
+ for (size_t i=0; i<gm_.numberOfVariables(); ++i){
+ bool found = false;
+ for(typename GM::ConstFactorIterator it=gm_.factorsOfVariableBegin(i); it!=gm_.factorsOfVariableEnd(i);++it){
+ if(gm_[*it].numberOfVariables() == 1){
+ unaries[i] = *it;
+ found = true;
+ if(gm_[*it].numberOfLabels(0) != numLabels)
+ throw RuntimeError("number of labels are not equal for all variables");
+ continue;
+ }
+ }
+ if(!found)
+ throw RuntimeError("missing unary");
+ }
+
- for(size_t l=0; l<numLabels; ++l){
- for (int i = 0; i < height_; ++i) {
- for (int j = 0; j < width_; ++j) {
- const size_t var = ind(i, j);
- energy[var] =gm_[unaries[ind(i, j)]](&l);
- }
- }
-
- const int radius = (kernel.size()-1)/2;
- const int h = height_-1;
- const int w = width_ -1;
- for (int i = 0; i < height_; ++i) {
- for (int j = 0; j < width_; ++j) {
- double val = 0.0;
- const size_t var = ind(i, j);
- for (int k = 0; k < 2*radius+1; ++k) {
- int i2 = std::min( h,std::max(0,i-radius+k));
- val += kernel[k] * energy[ind(i2,j)];
- }
- temp[var] = val;
- }
- }
- for (int i = 0; i < height_; ++i) {
- for (int j = 0; j < width_; ++j) {
- double val = 0.0;
- const size_t var = ind(i, j);
- for (int k = 0; k < 2*radius+1; ++k) {
- int j2 = std::min(w,std::max(0,i-radius+k));
- val += kernel[k] * temp[ind(i, j2)];
- }
- if(param_.useLocalMargs_){
- margs[var][l]=val;
- }else{
- if(val < bluredEnergy[var]){
- bluredEnergy[var] = val;
- bluredOpt[var] = l;
- }
- }
- }
- }
- }
- if(param_.useLocalMargs_){
- localMargGens_.reserve(bluredOpt.size());
- for(size_t var=0 ; var<bluredOpt.size(); ++var){
- const ValueType minValue = *std::min_element(margs[var].begin(),margs[var].end());
- for(LabelType l=0; l<numLabels; ++l){
- margs[var][l]-= minValue;
- }
- for(LabelType l=0; l<numLabels; ++l){
- margs[var][l]=std::exp(-1.0*param_.temp_*margs[var][l]);
- }
- localMargGens_[var]=opengm::RandomDiscreteWeighted<LabelType,ValueType>(margs[var].begin(),margs[var].end(),var);
- }
- }else{
- uniformGens_.reserve(bluredOpt.size());
- for(size_t var=0 ; var<bluredOpt.size(); ++var){
- LabelType minVal = (LabelType)(std::max((double)(0) , bluredOpt[var]-param_.sigma_*1.5));
- LabelType maxVal = (LabelType)(std::min((double)(numLabels) , bluredOpt[var]+param_.sigma_*1.5));
- uniformGens_[var] = opengm::RandomUniform<LabelType>(minVal, maxVal+1, var);
- }
- }
- }
-
- void reset(){}
- size_t defaultNumStopIt() {return 10;}
-
- void getProposal(const std::vector<LabelType> ¤t , std::vector<LabelType> &proposal)
- {
- proposal.resize(gm_.numberOfVariables());
- if(param_.useLocalMargs_){
- for(size_t i=0; i<proposal.size();++i){
- proposal[i] = localMargGens_[i]();
- }
- }
- else{
- opengm::RandomUniform<LabelType> randomLabel(0, gm_.numberOfLabels(0),currentStep_);
- if ((currentStep_ % 2) == 0){
- for(size_t i=0; i<proposal.size();++i){
- proposal[i] = randomLabel();
- }
- }else{
- for(size_t i=0; i<proposal.size();++i){
- proposal[i] = uniformGens_[i]();
- }
- }
- }
- ++currentStep_;
- }
- private:
- size_t ind(int i, int j){ return i+j*height_;}
- const GM &gm_;
- Parameter param_;
- size_t height_;
- size_t width_;
- LabelType currentStep_;
-
- // Random Generators
- std::vector<opengm::RandomDiscreteWeighted<LabelType,ValueType> > localMargGens_;
- std::vector<opengm::RandomUniform<LabelType> > uniformGens_;
- };
-
-
- template<class GM, class ACC>
- class DynamincGen{
- public:
-
- template<class _GM>
- struct RebindGm{
- typedef DynamincGen<_GM, ACC> type;
- };
-
- template<class _GM,class _ACC>
- struct RebindGmAndAcc{
- typedef DynamincGen<_GM, _ACC > type;
- };
+ for(size_t l=0; l<numLabels; ++l){
+ for (int i = 0; i < height_; ++i) {
+ for (int j = 0; j < width_; ++j) {
+ const size_t var = ind(i, j);
+ energy[var] =gm_[unaries[ind(i, j)]](&l);
+ }
+ }
+ const int radius = (kernel.size()-1)/2;
+ const int h = height_-1;
+ const int w = width_ -1;
+ for (int i = 0; i < height_; ++i) {
+ for (int j = 0; j < width_; ++j) {
+ double val = 0.0;
+ const size_t var = ind(i, j);
+ for (int k = 0; k < 2*radius+1; ++k) {
+ int i2 = std::min( h,std::max(0,i-radius+k));
+ val += kernel[k] * energy[ind(i2,j)];
+ }
+ temp[var] = val;
+ }
+ }
+ for (int i = 0; i < height_; ++i) {
+ for (int j = 0; j < width_; ++j) {
+ double val = 0.0;
+ const size_t var = ind(i, j);
+ for (int k = 0; k < 2*radius+1; ++k) {
+ int j2 = std::min(w,std::max(0,i-radius+k));
+ val += kernel[k] * temp[ind(i, j2)];
+ }
+ if(param_.useLocalMargs_){
+ margs[var][l]=val;
+ }else{
+ if(val < bluredEnergy[var]){
+ bluredEnergy[var] = val;
+ bluredOpt[var] = l;
+ }
+ }
+ }
+ }
+ }
+ if(param_.useLocalMargs_){
+ localMargGens_.reserve(bluredOpt.size());
+ for(size_t var=0 ; var<bluredOpt.size(); ++var){
+ const ValueType minValue = *std::min_element(margs[var].begin(),margs[var].end());
+ for(LabelType l=0; l<numLabels; ++l){
+ margs[var][l]-= minValue;
+ }
+ for(LabelType l=0; l<numLabels; ++l){
+ margs[var][l]=std::exp(-1.0*param_.temp_*margs[var][l]);
+ }
+ localMargGens_[var]=opengm::RandomDiscreteWeighted<LabelType,ValueType>(margs[var].begin(),margs[var].end(),var);
+ }
+ }else{
+ uniformGens_.reserve(bluredOpt.size());
+ for(size_t var=0 ; var<bluredOpt.size(); ++var){
+ LabelType minVal = (LabelType)(std::max((double)(0) , bluredOpt[var]-param_.sigma_*1.5));
+ LabelType maxVal = (LabelType)(std::min((double)(numLabels) , bluredOpt[var]+param_.sigma_*1.5));
+ uniformGens_[var] = opengm::RandomUniform<LabelType>(minVal, maxVal+1, var);
+ }
+ }
+ }
- typedef ACC AccumulationType;
- typedef GM GraphicalModelType;
- OPENGM_GM_TYPE_TYPEDEFS;
- enum GeneratorType{
- AlphaExpansion,
- AlphaBetaSwap,
- UpDown,
- Random,
- RandomLF,
- NonUniformRandom,
- Blur,
- EnergyBlur
++//<<<<<<< HEAD
++//template<class GM, class ACC>
++//class DynamincGen{
++//public:
++
++// template<class _GM>
++// struct RebindGm{
++// typedef DynamincGen<_GM, ACC> type;
++// };
++
++// template<class _GM,class _ACC>
++// struct RebindGmAndAcc{
++// typedef DynamincGen<_GM, _ACC > type;
++// };
++
++
++// typedef ACC AccumulationType;
++// typedef GM GraphicalModelType;
++// OPENGM_GM_TYPE_TYPEDEFS;
++// enum GeneratorType{
++// AlphaExpansion,
++// AlphaBetaSwap,
++// UpDown,
++// Random,
++// RandomLF,
++// NonUniformRandom,
++// Blur,
++// EnergyBlur
++// };
++
++// struct Parameter{
++// Parameter(){
++
++// }
++// template<class P>
++// Parameter(const P & p)
++// : gen_(p.gen_){
++// }
++
++// GeneratorType gen_;
++// };
++//=======
+ void reset(){}
+ size_t defaultNumStopIt() {return 10;}
+
+ void getProposal(const std::vector<LabelType> ¤t , std::vector<LabelType> &proposal)
+ {
+ proposal.resize(gm_.numberOfVariables());
+ if(param_.useLocalMargs_){
+ for(size_t i=0; i<proposal.size();++i){
+ proposal[i] = localMargGens_[i]();
+ }
+ }
+ else{
+ opengm::RandomUniform<LabelType> randomLabel(0, gm_.numberOfLabels(0),currentStep_);
+ if ((currentStep_ % 2) == 0){
+ for(size_t i=0; i<proposal.size();++i){
+ proposal[i] = randomLabel();
+ }
+ }else{
+ for(size_t i=0; i<proposal.size();++i){
+ proposal[i] = uniformGens_[i]();
+ }
+ }
+ }
+ ++currentStep_;
+ }
+ private:
+ size_t ind(int i, int j){ return i+j*height_;}
+ const GM &gm_;
+ Parameter param_;
+ size_t height_;
+ size_t width_;
+ LabelType currentStep_;
+
+ // Random Generators
+ std::vector<opengm::RandomDiscreteWeighted<LabelType,ValueType> > localMargGens_;
+ std::vector<opengm::RandomUniform<LabelType> > uniformGens_;
};
- struct Parameter{
- Parameter(){
++//>>>>>>> e3408d084b219dce69a515117c3c1253e3cb5b7d
+ template<class GM, class ACC>
+ class DynamincGen{
+ public:
+ typedef ACC AccumulationType;
+ typedef GM GraphicalModelType;
+ OPENGM_GM_TYPE_TYPEDEFS;
+ enum GeneratorType{
+ AlphaExpansion,
+ AlphaBetaSwap,
+ UpDown,
+ Random,
+ RandomLF,
+ NonUniformRandom,
+ Blur,
+ EnergyBlur
+ };
+
+ struct Parameter{
+ GeneratorType gen_;
+ };
+
+ DynamincGen(const GM & gm, const Parameter & param)
+ :
+ gm_(gm),
+ param_(param){
}
- template<class P>
- Parameter(const P & p)
- : gen_(p.gen_){
- }
-
- GeneratorType gen_;
- };
-
- DynamincGen(const GM & gm, const Parameter & param)
- :
- gm_(gm),
- param_(param){
- }
- void reset(){
- if(param_.gen_ == AlphaExpansion)
- alphaExpansionGen_->reset();
- else if(param_.gen_ == AlphaBetaSwap)
- alphaBetaSwapGen_->reset();
- else if(param_.gen_ == UpDown)
- upDownGen_->reset();
- else if(param_.gen_ == Random)
- randomGen_->reset();
- else if(param_.gen_ == RandomLF)
- randomLFGen_->reset();
- else if(param_.gen_ == NonUniformRandom)
- nonUniformRandomGen_->reset();
- else if(param_.gen_ == Blur)
- blurGen_->reset();
- else if(param_.gen_ == EnergyBlur)
- energyBlurGen_->reset();
- else{
- throw RuntimeError("unknown generator type");
+ void reset(){
+ if(param_.gen_ == AlphaExpansion)
+ alphaExpansionGen_->reset();
+ else if(param_.gen_ == AlphaBetaSwap)
+ alphaBetaSwapGen_->reset();
+ else if(param_.gen_ == UpDown)
+ upDownGen_->reset();
+ else if(param_.gen_ == Random)
+ randomGen_->reset();
+ else if(param_.gen_ == RandomLF)
+ randomLFGen_->reset();
+ else if(param_.gen_ == NonUniformRandom)
+ nonUniformRandomGen_->reset();
+ else if(param_.gen_ == Blur)
+ blurGen_->reset();
+ else if(param_.gen_ == EnergyBlur)
+ energyBlurGen_->reset();
+ else{
+ throw RuntimeError("unknown generator type");
+ }
}
- }
- size_t defaultNumStopIt() {
- if(param_.gen_ == AlphaExpansion)
- return alphaExpansionGen_->defaultNumStopIt();
- else if(param_.gen_ == AlphaBetaSwap)
- return alphaBetaSwapGen_->defaultNumStopIt();
- else if(param_.gen_ == UpDown)
- return upDownGen_->defaultNumStopIt();
- else if(param_.gen_ == Random)
- return randomGen_->defaultNumStopIt();
- else if(param_.gen_ == RandomLF)
- return randomLFGen_->defaultNumStopIt();
- else if(param_.gen_ == NonUniformRandom)
- return nonUniformRandomGen_->defaultNumStopIt();
- else if(param_.gen_ == Blur)
- return blurGen_->defaultNumStopIt();
- else if(param_.gen_ == EnergyBlur)
- return energyBlurGen_->defaultNumStopIt();
- else{
- throw RuntimeError("unknown generator type");
+ size_t defaultNumStopIt() {
+ if(param_.gen_ == AlphaExpansion)
+ return alphaExpansionGen_->defaultNumStopIt();
+ else if(param_.gen_ == AlphaBetaSwap)
+ return alphaBetaSwapGen_->defaultNumStopIt();
+ else if(param_.gen_ == UpDown)
+ return upDownGen_->defaultNumStopIt();
+ else if(param_.gen_ == Random)
+ return randomGen_->defaultNumStopIt();
+ else if(param_.gen_ == RandomLF)
+ return randomLFGen_->defaultNumStopIt();
+ else if(param_.gen_ == NonUniformRandom)
+ return nonUniformRandomGen_->defaultNumStopIt();
+ else if(param_.gen_ == Blur)
+ return blurGen_->defaultNumStopIt();
+ else if(param_.gen_ == EnergyBlur)
+ return energyBlurGen_->defaultNumStopIt();
+ else{
+ throw RuntimeError("unknown generator type");
+ }
}
- }
- void getProposal(const std::vector<LabelType> ¤t , std::vector<LabelType> &proposal){
- if(param_.gen_ == AlphaExpansion)
- return alphaExpansionGen_->getProposal(current, proposal);
- else if(param_.gen_ == AlphaBetaSwap)
- return alphaBetaSwapGen_->getProposal(current, proposal);
- else if(param_.gen_ == UpDown)
- return upDownGen_->getProposal(current, proposal);
- else if(param_.gen_ == Random)
- return randomGen_->getProposal(current, proposal);
- else if(param_.gen_ == RandomLF)
- return randomLFGen_->getProposal(current, proposal);
- else if(param_.gen_ == NonUniformRandom)
- return nonUniformRandomGen_->getProposal(current, proposal);
- else if(param_.gen_ == Blur)
- return blurGen_->getProposal(current, proposal);
- else if(param_.gen_ == EnergyBlur)
- return energyBlurGen_->getProposal(current, proposal);
- else{
- throw RuntimeError("unknown generator type");
+ void getProposal(const std::vector<LabelType> ¤t , std::vector<LabelType> &proposal){
+ if(param_.gen_ == AlphaExpansion)
+ return alphaExpansionGen_->getProposal(current, proposal);
+ else if(param_.gen_ == AlphaBetaSwap)
+ return alphaBetaSwapGen_->getProposal(current, proposal);
+ else if(param_.gen_ == UpDown)
+ return upDownGen_->getProposal(current, proposal);
+ else if(param_.gen_ == Random)
+ return randomGen_->getProposal(current, proposal);
+ else if(param_.gen_ == RandomLF)
+ return randomLFGen_->getProposal(current, proposal);
+ else if(param_.gen_ == NonUniformRandom)
+ return nonUniformRandomGen_->getProposal(current, proposal);
+ else if(param_.gen_ == Blur)
+ return blurGen_->getProposal(current, proposal);
+ else if(param_.gen_ == EnergyBlur)
+ return energyBlurGen_->getProposal(current, proposal);
+ else{
+ throw RuntimeError("unknown generator type");
+ }
}
- }
- private:
- const GM & gm_;
- Parameter param_;
-
- // generators
- AlphaExpansionGen<GM, ACC> * alphaExpansionGen_;
- AlphaBetaSwapGen <GM, ACC> * alphaBetaSwapGen_;
- UpDownGen<GM, ACC> * upDownGen_;
- RandomGen<GM, ACC> * randomGen_;
- RandomLFGen<GM, ACC> * randomLFGen_;
- NonUniformRandomGen<GM, ACC> * nonUniformRandomGen_;
- BlurGen<GM, ACC> * blurGen_;
- EnergyBlurGen<GM, ACC> * energyBlurGen_;
- };
-
+ private:
+ const GM & gm_;
+ Parameter param_;
+
+ // generators
+ AlphaExpansionGen<GM, ACC> * alphaExpansionGen_;
+ AlphaBetaSwapGen <GM, ACC> * alphaBetaSwapGen_;
+ UpDownGen<GM, ACC> * upDownGen_;
+ RandomGen<GM, ACC> * randomGen_;
+ RandomLFGen<GM, ACC> * randomLFGen_;
+ NonUniformRandomGen<GM, ACC> * nonUniformRandomGen_;
+ BlurGen<GM, ACC> * blurGen_;
+ EnergyBlurGen<GM, ACC> * energyBlurGen_;
+ };
}
diff --cc include/opengm/inference/lpcplex.hxx
index 0dde612,28fafcf..6508354
--- a/include/opengm/inference/lpcplex.hxx
+++ b/include/opengm/inference/lpcplex.hxx
@@@ -558,12 -508,12 +558,13 @@@ LPCplex<GM, ACC>::infe
//cplex_.setParam(IloCplex::MIRCuts, parameter_.MIRCutLevel_);
// solve problem
+
if(!cplex_.solve()) {
std::cout << "failed to optimize. " <<cplex_.getStatus() << std::endl;
+ inferenceStarted_ = 0;
return UNKNOWN;
}
- cplex_.getValues(sol_, x_);
+ cplex_.getValues(sol_, x_);
}
catch(IloCplex::Exception e) {
std::cout << "caught CPLEX exception: " << e << std::endl;
diff --cc include/opengm/inference/multicut.hxx
index 0a764dc,7933d17..e7fcb89
--- a/include/opengm/inference/multicut.hxx
+++ b/include/opengm/inference/multicut.hxx
@@@ -125,8 -118,8 +126,9 @@@ public
bool useOldPriorityQueue_;
bool useChordalSearch_;
bool useBufferedStates_;
+ bool initializeWith3Cycles_;
+
/// \param numThreads number of threads that should be used (default = 0 [automatic])
/// \param cutUp value which the optima at least has (helps to cut search-tree)
Parameter
@@@ -134,18 -127,10 +136,34 @@@
int numThreads=0,
double cutUp=1.0e+75
)
- : numThreads_(numThreads),
- : numThreads_(numThreads), verbose_(false),verboseCPLEX_(false), cutUp_(cutUp),
- timeOut_(36000000), maximalNumberOfConstraintsPerRound_(1000000),
- edgeRoundingValue_(0.00000001),MWCRounding_(NEAREST), reductionMode_(3),useOldPriorityQueue_(false), useChordalSearch_(false), useBufferedStates_(false),
++//<<<<<<< HEAD
++// : numThreads_(numThreads),
++// verbose_(false),
++// verboseCPLEX_(false),
++// cutUp_(cutUp),
++// timeOut_(36000000),
++// maximalNumberOfConstraintsPerRound_(1000000),
++// edgeRoundingValue_(0.00000001),
++// MWCRounding_(NEAREST),
++// reductionMode_(3),
++// useOldPriorityQueue_(false),
++// useChordalSearch_(false),
++// useBufferedStates_(false)
++//=======
++ : numThreads_(numThreads),
+ verbose_(false),
+ verboseCPLEX_(false),
+ cutUp_(cutUp),
+ timeOut_(36000000),
+ maximalNumberOfConstraintsPerRound_(1000000),
+ edgeRoundingValue_(0.00000001),
- MWCRounding_(NEAREST),
++ MWCRounding_(NEAREST),
+ reductionMode_(3),
- useOldPriorityQueue_(false),
- useChordalSearch_(false),
- useBufferedStates_(false)
++ useOldPriorityQueue_(false),
++ useChordalSearch_(false),
++ useBufferedStates_(false),
+ initializeWith3Cycles_(false)
++//>>>>>>> e3408d084b219dce69a515117c3c1253e3cb5b7d
{};
template<class OTHER_PARAM>
@@@ -153,18 -138,11 +171,33 @@@
(
const OTHER_PARAM & p
)
- : numThreads_(p.numThreads_),
- : numThreads_(p.numThreads_), verbose_(p.verbose_),verboseCPLEX_(p.verboseCPLEX_), cutUp_(p.cutUp_),
- timeOut_(p.timeOut_), maximalNumberOfConstraintsPerRound_(p.maximalNumberOfConstraintsPerRound_),
- edgeRoundingValue_(p.edgeRoundingValue_),MWCRounding_(p.MWCRounding_), reductionMode_(p.reductionMode_),
- useOldPriorityQueue_(p.useOldPriorityQueue_), useChordalSearch_(p.useChordalSearch_),
++//<<<<<<< HEAD
++// : numThreads_(p.numThreads_),
++// verbose_(p.verbose_),
++// verboseCPLEX_(p.verboseCPLEX_),
++// cutUp_(p.cutUp_),
++// timeOut_(p.timeOut_),
++// maximalNumberOfConstraintsPerRound_(p.maximalNumberOfConstraintsPerRound_),
++// edgeRoundingValue_(p.edgeRoundingValue_),
++// MWCRounding_(static_cast<MWCRounding>(p.MWCRounding_)),
++// reductionMode_(p.reductionMode_),
++// allowCutsWithin_(p.allowCutsWithin_),
++// useOldPriorityQueue_(p.useOldPriorityQueue_),
++// useChordalSearch_(p.useChordalSearch_)
++//=======
++ : numThreads_(p.numThreads_),
+ verbose_(p.verbose_),
- verboseCPLEX_(p.verboseCPLEX_),
++ verboseCPLEX_(p.verboseCPLEX_),
+ cutUp_(p.cutUp_),
- timeOut_(p.timeOut_),
++ timeOut_(p.timeOut_),
+ maximalNumberOfConstraintsPerRound_(p.maximalNumberOfConstraintsPerRound_),
+ edgeRoundingValue_(p.edgeRoundingValue_),
- MWCRounding_(static_cast<MWCRounding>(p.MWCRounding_)),
- reductionMode_(p.reductionMode_),
- allowCutsWithin_(p.allowCutsWithin_),
- useOldPriorityQueue_(p.useOldPriorityQueue_),
- useChordalSearch_(p.useChordalSearch_)
++ MWCRounding_(p.MWCRounding_),
++ reductionMode_(p.reductionMode_),
++ useOldPriorityQueue_(p.useOldPriorityQueue_),
++ useChordalSearch_(p.useChordalSearch_),
+ initializeWith3Cycles_(false)
++//>>>>>>> e3408d084b219dce69a515117c3c1253e3cb5b7d
{};
};
diff --cc src/unittest/CMakeLists.txt
index 1ba6251,8110b7a..bbf3411
--- a/src/unittest/CMakeLists.txt
+++ b/src/unittest/CMakeLists.txt
@@@ -115,6 -115,8 +118,9 @@@ if(BUILD_TESTING
add_executable(test-lp-functiontransfer test_lp_functiontransfer.cxx ${headers})
add_test(test-lp-functiontransfer ${CMAKE_CURRENT_BINARY_DIR}/test-lp-functiontransfer)
+ add_executable(test-canonicalview test_canonicalview.cxx ${headers})
+ add_test(test-canonicalview ${CMAKE_CURRENT_BINARY_DIR}/test-canonicalview)
+
add_subdirectory(inference)
+ add_subdirectory(learning)
endif()
--
Alioth's /usr/local/bin/git-commit-notice on /srv/git.debian.org/git/debian-science/packages/opengm.git
More information about the debian-science-commits
mailing list