[opengm] 202/386: new dataset, way
Ghislain Vaillant
ghisvail-guest at moszumanska.debian.org
Wed Aug 31 08:37:41 UTC 2016
This is an automated email from the git hooks/post-receive script.
ghisvail-guest pushed a commit to branch debian/master
in repository opengm.
commit de44f28f8975e1f4188f120a9fe2bde0e402c545
Author: DerThorsten <thorsten.beier at iwr.uni-heidelberg.de>
Date: Tue Jan 6 17:27:24 2015 +0100
new dataset, way
---
fubar/brown_horse.py | 125 ++++++++++++++++++++++++++
fubar/make_grid_potts_dset.py | 116 ++++++++++++++++++++++++
include/opengm/learning/struct-max-margin.hxx | 2 +-
include/opengm/learning/subgradient_ssvm.hxx | 2 +-
4 files changed, 243 insertions(+), 2 deletions(-)
diff --git a/fubar/brown_horse.py b/fubar/brown_horse.py
new file mode 100644
index 0000000..60dee6d
--- /dev/null
+++ b/fubar/brown_horse.py
@@ -0,0 +1,125 @@
+import numpy
+import opengm
+from opengm import learning
+import vigra
+from progressbar import *
+import glob
+import os
+from functools import partial
+from make_grid_potts_dset import secondOrderImageDataset, getPbar
+
+
+
+def posiFeatures(img):
+ shape = img.shape[0:2]
+ x = numpy.linspace(0, 1, shape[0])
+ y = numpy.linspace(0, 1, shape[1])
+ xv, yv = numpy.meshgrid(y, x)
+ xv -=0.5
+ yv -=0.5
+
+ rad = numpy.sqrt(xv**2 + yv**2)[:,:,None]
+ erad = numpy.exp(1.0 - rad)
+ xva = (xv**2)[:,:,None]
+ yva = (yv**2)[:,:,None]
+
+ res = numpy.concatenate([erad, rad,xva,yva,xv[:,:,None],yv[:,:,None]],axis=2)
+ assert res.shape[0:2] == img.shape[0:2]
+ return res
+
+#i = numpy.ones([7, 5])
+#
+#print posiFeatures(i).shape
+#
+# where is the dataset stored
+dsetRoot = '/home/tbeier/datasets/weizmann_horse_db/'
+imgPath = dsetRoot + 'brown_horse/'
+gtBasePath = dsetRoot + 'figure_ground/'
+
+imgFiles = glob.glob(imgPath+'*.jpg')
+takeNth = 1
+imgs = []
+gts = []
+pbar = getPbar(len(imgFiles), 'Load Image')
+pbar.start()
+for i,path in enumerate(imgFiles):
+ gtPath = gtBasePath + os.path.basename(path)
+ rgbImg = vigra.impex.readImage(path)
+ gtImg = vigra.impex.readImage(gtPath).astype('uint32')[::takeNth,::takeNth]
+ gtImg[gtImg<125] = 0
+ gtImg[gtImg>=125] = 1
+ rgbImg = vigra.resize(rgbImg, [gtImg.shape[0],gtImg.shape[1]])
+ imgs.append(rgbImg)
+ gts.append(gtImg)
+ pbar.update(i)
+pbar.finish()
+
+def getSelf(img):
+ return img
+
+
+def labHessianOfGaussian(img, sigma):
+ l = vigra.colors.transform_RGB2Lab(img)[:,:,0]
+ l = vigra.taggedView(l,'xy')
+ return vigra.filters.hessianOfGaussianEigenvalues(l, sigma)
+
+def labStructTensorEv(img, sigma):
+ l = vigra.colors.transform_RGB2Lab(img)[:,:,0]
+ l = vigra.taggedView(l,'xy')
+ return vigra.filters.structureTensorEigenvalues(l, sigma, 2*sigma)
+
+fUnary = [
+ posiFeatures,
+ getSelf,
+ vigra.colors.transform_RGB2XYZ,
+ vigra.colors.transform_RGB2Lab,
+ vigra.colors.transform_RGB2Luv,
+ partial(labHessianOfGaussian, sigma=1.0),
+ partial(labHessianOfGaussian, sigma=2.0),
+ partial(vigra.filters.gaussianGradientMagnitude, sigma=1.0),
+ partial(vigra.filters.gaussianGradientMagnitude, sigma=2.0),
+]
+
+fBinary = [
+ posiFeatures,
+ vigra.colors.transform_RGB2XYZ,
+ vigra.colors.transform_RGB2Lab,
+ vigra.colors.transform_RGB2Luv,
+ partial(labHessianOfGaussian, sigma=1.0),
+ partial(labHessianOfGaussian, sigma=2.0),
+ partial(labStructTensorEv, sigma=1.0),
+ partial(labStructTensorEv, sigma=2.0),
+ partial(vigra.filters.gaussianGradientMagnitude, sigma=1.0),
+ partial(vigra.filters.gaussianGradientMagnitude, sigma=2.0),
+]
+
+
+dataset,test_set = secondOrderImageDataset(imgs=imgs, gts=gts, numberOfLabels=2,
+ fUnary=fUnary, fBinary=fBinary,
+ addConstFeature=False)
+
+
+
+
+learner = learning.subgradientSSVM(dataset, learningRate=0.3, C=100,
+ learningMode='batch',maxIterations=500)
+
+#learner = learning.structMaxMarginLearner(dataset, 0.1, 0.001, 0)
+
+
+learner.learn(infCls=opengm.inference.QpboExternal,
+ parameter=opengm.InfParam())
+
+
+
+# predict on test test
+for (rgbImg, gtImg, gm) in test_set :
+ # infer for test image
+ inf = opengm.inference.QpboExternal(gm)
+ inf.infer()
+ arg = inf.arg()
+ arg = arg.reshape( numpy.squeeze(gtImg.shape))
+
+ vigra.segShow(rgbImg, arg+2)
+ vigra.show()
+
diff --git a/fubar/make_grid_potts_dset.py b/fubar/make_grid_potts_dset.py
new file mode 100644
index 0000000..cda360b
--- /dev/null
+++ b/fubar/make_grid_potts_dset.py
@@ -0,0 +1,116 @@
+import numpy
+import opengm
+from opengm import learning
+import vigra
+from progressbar import *
+from functools import partial
+
+
+
+def getPbar(size, name):
+ widgets = ['%s: '%name, Percentage(), ' ', Bar(marker='0',left='[',right=']'),
+ ' ', ETA(), ' ', FileTransferSpeed()] #see docs for other options
+ pbar = ProgressBar(widgets=widgets, maxval=size)
+ return pbar
+
+def secondOrderImageDataset(imgs, gts, numberOfLabels, fUnary, fBinary, addConstFeature, trainFraction=0.75):
+ assert numberOfLabels == 2
+
+ # train test
+ nImg = len(imgs)
+ nTrain = int(float(nImg)*trainFraction+0.5)
+ nTest = (nImg-nTrain)
+
+
+
+ # compute features for a single image
+ tImg = imgs[0]
+ unaryFeat = [f(tImg) for f in fUnary]
+ unaryFeat = numpy.nan_to_num(numpy.concatenate(unaryFeat,axis=2).view(numpy.ndarray))
+ nUnaryFeat = unaryFeat.shape[-1] + int(addConstFeature)
+
+ binaryFeat = [f(tImg) for f in fBinary]
+ binaryFeat = numpy.nan_to_num(numpy.concatenate(binaryFeat,axis=2).view(numpy.ndarray))
+ nBinaryFeat = binaryFeat.shape[-1] + int(addConstFeature)
+ nWeights = nUnaryFeat + nBinaryFeat
+ print "------------------------------------------------"
+ print "nTrain",nTrain,"nTest",nTest
+ print "nWeights",nWeights,"(",nUnaryFeat,nBinaryFeat,")"
+ print "------------------------------------------------"
+
+ train_set = []
+ tentative_test_set = []
+
+ for i,(img,gt) in enumerate(zip(imgs,gts)):
+ if(i<nTrain):
+ train_set.append((img,gt))
+ else:
+ tentative_test_set.append((img,gt))
+
+
+ dataset = learning.createDataset(numWeights=nWeights, loss='h')
+ weights = dataset.getWeights()
+ uWeightIds = numpy.arange(nUnaryFeat ,dtype='uint64')
+ bWeightIds = numpy.arange(start=nUnaryFeat,stop=nWeights,dtype='uint64')
+
+ def makeModel(img,gt):
+ shape = gt.shape[0:2]
+ numVar = shape[0] * shape[1]
+
+ # make model
+ gm = opengm.gm(numpy.ones(numVar)*2)
+
+ # compute features
+ unaryFeat = [f(img) for f in fUnary]
+ unaryFeat = numpy.nan_to_num(numpy.concatenate(unaryFeat,axis=2).view(numpy.ndarray))
+ unaryFeat = unaryFeat.reshape([numVar,-1])
+ binaryFeat = [f(img) for f in fBinary]
+ binaryFeat = numpy.nan_to_num(numpy.concatenate(binaryFeat,axis=2).view(numpy.ndarray))
+ binaryFeat = binaryFeat.reshape([numVar,-1])
+
+
+
+ # add unaries
+ lUnaries = learning.lUnaryFunctions(weights =weights,numberOfLabels = numberOfLabels,
+ features=unaryFeat, weightIds = uWeightIds.reshape([1,-1]).copy(),
+ featurePolicy= learning.FeaturePolicy.sharedBetweenLabels,
+ makeFirstEntryConst=numberOfLabels==2, addConstFeature=addConstFeature)
+ fids = gm.addFunctions(lUnaries)
+ gm.addFactors(fids, numpy.arange(numVar))
+
+ # add second order
+ vis2Order=opengm.gridVis(shape[0:2],True)
+
+ fU = binaryFeat[vis2Order[:,0],:]
+ fV = binaryFeat[vis2Order[:,1],:]
+ fB = (fU + fV / 2.0)
+ lp = learning.lPottsFunctions(weights=weights, numberOfLabels=numberOfLabels,
+ features=fB, weightIds=bWeightIds,
+ addConstFeature=addConstFeature)
+ gm.addFactors(gm.addFunctions(lp), vis2Order)
+
+ return gm
+
+ # make training models
+ pbar = getPbar(nTrain,"Training Models")
+ pbar.start()
+ for i,(img,gt) in enumerate(train_set):
+ gm = makeModel(img, gt)
+ dataset.pushBackInstance(gm,gt.reshape(-1).astype(opengm.label_type))
+ pbar.update(i)
+ pbar.finish()
+
+
+ # make test models
+ test_set = []
+ pbar = getPbar(nTest,"Test Models")
+ pbar.start()
+ for i,(img,gt) in enumerate(tentative_test_set):
+ gm = makeModel(img, gt)
+ test_set.append((img, gt, gm))
+ pbar.update(i)
+ pbar.finish()
+
+ return dataset, test_set
+
+
diff --git a/include/opengm/learning/struct-max-margin.hxx b/include/opengm/learning/struct-max-margin.hxx
index b670866..2974c57 100644
--- a/include/opengm/learning/struct-max-margin.hxx
+++ b/include/opengm/learning/struct-max-margin.hxx
@@ -103,7 +103,7 @@ private:
for (int i = 0; i < _dataset.getNumberOfModels(); i++) {
// get E(x,y) and F(x,y)
- std::cout << "locking model " << i << " of " << _dataset.getNumberOfModels() << std::endl;
+ //std::cout << "locking model " << i << " of " << _dataset.getNumberOfModels() << std::endl;
_dataset.lockModel(i);
const typename DatasetType::GMType& gm = _dataset.getModel(i);
const typename DatasetType::GMWITHLOSS& gml = _dataset.getModelWithLoss(i);
diff --git a/include/opengm/learning/subgradient_ssvm.hxx b/include/opengm/learning/subgradient_ssvm.hxx
index 1427621..fd1a3fb 100644
--- a/include/opengm/learning/subgradient_ssvm.hxx
+++ b/include/opengm/learning/subgradient_ssvm.hxx
@@ -212,7 +212,7 @@ namespace opengm {
omp_unset_lock(&modelLockUnlock);
}
if(iteration_%1==0){
- std::cout<<"loss :"<< -1.0*totalLoss <<"\n";
+ std::cout<<iteration_<<" loss :"<< -1.0*totalLoss <<"\n";
}
// update the weights
const double wChange =updateWeights();
--
Alioth's /usr/local/bin/git-commit-notice on /srv/git.debian.org/git/debian-science/packages/opengm.git
More information about the debian-science-commits
mailing list