[opengm] 133/386: Update real world example, add Cplex to available inference methods for solvers.
Ghislain Vaillant
ghisvail-guest at moszumanska.debian.org
Wed Aug 31 08:36:08 UTC 2016
This is an automated email from the git hooks/post-receive script.
ghisvail-guest pushed a commit to branch debian/master
in repository opengm.
commit c26a00418fe34ef9f00291c180fe613171d2599a
Author: Carsten Haubold <carstenhaubold at googlemail.com>
Date: Thu Dec 18 17:04:03 2014 +0100
Update real world example, add Cplex to available inference methods for solvers.
---
fubar/real_example.py | 41 ++++++++++++-----------
src/interfaces/python/opengm/learning/__init__.py | 4 +--
src/interfaces/python/opengm/learning/helper.hxx | 11 ++++++
3 files changed, 34 insertions(+), 22 deletions(-)
diff --git a/fubar/real_example.py b/fubar/real_example.py
index bf3d9c3..fdb7687 100644
--- a/fubar/real_example.py
+++ b/fubar/real_example.py
@@ -3,11 +3,11 @@ import opengm.learning as learning
from opengm import numpy
import vigra
-nModels = 4
+nModels = 1
nLables = 2
shape = [10, 10]
numVar = shape[0]*shape[1]
-nWeights = 12
+nWeights = 9
def makeGt(shape):
gt=numpy.ones(shape,dtype='uint8')
@@ -19,27 +19,26 @@ def makeGt(shape):
weightVals = numpy.ones(nWeights)
weights = opengm.learning.Weights(weightVals)
-uWeightIds = numpy.arange(8,dtype='uint64').reshape(2,4)
+uWeightIds = numpy.arange(6,dtype='uint64').reshape(2,3)
print uWeightIds
-bWeightIds = numpy.array([8,9,10,11],dtype='uint64')
-
+bWeightIds = numpy.array([6,7,8],dtype='uint64')
dataset = learning.createDataset(loss='h')
-
+dataset.setWeights(weights)
def makeFeatures(gt):
random = numpy.random.rand(*gt.shape)-0.5
randGt = random + gt
feat = []
- for sigma in [1.0, 1.5, 2.0]:
+ for sigma in [1.0, 2.0]:
feat.append(vigra.filters.gaussianSmoothing(randGt.astype('float32'),sigma) )
featB = []
- for sigma in [1.0, 1.5, 2.0]:
+ for sigma in [1.0, 2.0]:
featB.append(vigra.filters.gaussianGradientMagnitude(randGt.astype('float32'),sigma) )
@@ -57,13 +56,17 @@ for mi in range(nModels):
unaries,binaries = makeFeatures(gt)
- print unaries, binaries
+ # print unaries, binaries
for x in range(shape[0]):
for y in range(shape[1]):
uFeat = numpy.append(unaries[x,y,:], [1]).astype(opengm.value_type)
uFeat = numpy.repeat(uFeat[:,numpy.newaxis],2,axis=1).T
+ print(unaries[x,y,:])
+ print(unaries.shape)
+ print(uFeat)
+ print(uFeat.shape)
lu = opengm.LUnaryFunction(weights=weights,numberOfLabels=nLables, features=uFeat, weightIds=uWeightIds)
@@ -71,8 +74,6 @@ for mi in range(nModels):
fid= gm.addFunction(lu)
gm.addFactor(fid, y+x*shape[1])
-
-
for x in range(shape[0]):
for y in range(shape[1]):
@@ -87,19 +88,19 @@ for mi in range(nModels):
fid= gm.addFunction(pf)
gm.addFactor(fid, [y+x*shape[1], y+1+x*shape[1]])
-
-
dataset.pushBackInstance(gm,gtFlat.astype(opengm.label_type))
-
# for grid search learner
-lowerBounds = numpy.ones(nWeights)*-2.0
-upperBounds = numpy.ones(nWeights)*2.0
-nTestPoints =numpy.ones(nWeights).astype('uint64')*10
+lowerBounds = numpy.ones(nWeights)*-1.0
+upperBounds = numpy.ones(nWeights)*1.0
+nTestPoints =numpy.ones(nWeights).astype('uint64')*5
+# learner = learning.gridSearchLearner(dataset=dataset,lowerBounds=lowerBounds, upperBounds=upperBounds,nTestPoints=nTestPoints)
+learner = learning.structMaxMarginLearner(dataset, 1.0, 0.001, 0)
-learner = learning.gridSearchLearner(dataset=dataset,lowerBounds=lowerBounds, upperBounds=upperBounds,nTestPoints=nTestPoints)
+learner.learn(infCls=opengm.inference.Icm,
+ parameter=opengm.InfParam())
-learner.learn(infCls=opengm.inference.BeliefPropagation,
- parameter=opengm.InfParam(damping=0.5))
+for w in range(nWeights):
+ print weights[w]
diff --git a/src/interfaces/python/opengm/learning/__init__.py b/src/interfaces/python/opengm/learning/__init__.py
index 6e0c53f..b1293b0 100644
--- a/src/interfaces/python/opengm/learning/__init__.py
+++ b/src/interfaces/python/opengm/learning/__init__.py
@@ -54,8 +54,8 @@ def gridSearchLearner(dataset, lowerBounds, upperBounds, nTestPoints):
if struct.calcsize("P") * 8 == 32:
sizeT_type = 'uint32'
- param = learnerParamCls(nr(lowerBounds,dtype='float64'), nr(lowerBounds,dtype='float64'),
- nr(lowerBounds,dtype=sizeT_type))
+ param = learnerParamCls(nr(lowerBounds,dtype='float64'), nr(upperBounds,dtype='float64'),
+ nr(nTestPoints,dtype=sizeT_type))
learner = learnerCls(dataset, param)
return learner
diff --git a/src/interfaces/python/opengm/learning/helper.hxx b/src/interfaces/python/opengm/learning/helper.hxx
index 87bc676..08832fd 100644
--- a/src/interfaces/python/opengm/learning/helper.hxx
+++ b/src/interfaces/python/opengm/learning/helper.hxx
@@ -11,6 +11,10 @@
#include <opengm/learning/gridsearch-learning.hxx>
#include <opengm/inference/messagepassing/messagepassing.hxx>
+#ifdef WITH_CPLEX
+#include <opengm/inference/lpcplex.hxx>
+#endif
+
namespace opengm{
template<class LEARNER>
@@ -40,9 +44,16 @@ public:
typedef opengm::BeliefPropagationUpdateRules<GMType, ACC> UpdateRulesType;
typedef opengm::MessagePassing<GMType, ACC, UpdateRulesType, opengm::MaxDistance> BpInf;
+#ifdef WITH_CPLEX
+ typedef opengm::LPCplex<GMType, ACC> Cplex;
+#endif
+
c
.def("_learn",&pyLearnWithInf<IcmInf>)
.def("_learn",&pyLearnWithInf<BpInf>)
+#ifdef WITH_CPLEX
+ .def("_learn",&pyLearnWithInf<Cplex>)
+#endif
;
}
};
--
Alioth's /usr/local/bin/git-commit-notice on /srv/git.debian.org/git/debian-science/packages/opengm.git
More information about the debian-science-commits
mailing list