[opengm] 254/386: Merge branch 'master' of https://bitbucket.org/jkappes/opengm-learning into maximum_likelihood

Ghislain Vaillant ghisvail-guest at moszumanska.debian.org
Wed Aug 31 08:38:05 UTC 2016


This is an automated email from the git hooks/post-receive script.

ghisvail-guest pushed a commit to branch debian/master
in repository opengm.

commit f96d68a0645237ac1f9c539ea28ef267c4377b75
Merge: a116bff 2ba1371
Author: Janez Ales <janez.ales at iwr.uni-heidelberg.de>
Date:   Fri Jan 16 11:34:48 2015 +0100

    Merge branch 'master' of https://bitbucket.org/jkappes/opengm-learning into maximum_likelihood
    
    Conflicts:
    	src/interfaces/python/opengm/learning/__init__.py
    	src/interfaces/python/opengm/learning/pyMaxLikelihoodLearner.cxx

 fubar/brown_horse.py                               |   4 +-
 fubar/{brown_horse.py => brown_horse2.py}          |  35 ++-
 fubar/brown_horse_sp.py                            | 206 +++++++++++++
 fubar/make_grid_potts_dset.py                      | 128 --------
 fubar/make_mc_dset.py                              |  19 ++
 .../{toy_dataset.py => noisy_squares_3_labels.py}  |  34 +--
 fubar/simple_sp.py                                 | 165 +++++++++++
 fubar/toy_dataset.py                               |  34 +--
 include/opengm/graphicalmodel/weights.hxx          |   8 +-
 include/opengm/learning/dataset/dataset.hxx        |  12 +-
 .../opengm/learning/dataset/editabledataset.hxx    |  14 +-
 include/opengm/learning/loss/flexibleloss.hxx      |   6 +-
 include/opengm/learning/subgradient_ssvm.hxx       |  40 +--
 src/interfaces/python/opengm/learning/__init__.py  | 324 ++++++++++++++++++++-
 .../opengm/learning/pyMaxLikelihoodLearner.cxx     |   3 +-
 15 files changed, 787 insertions(+), 245 deletions(-)

diff --cc src/interfaces/python/opengm/learning/__init__.py
index f844a32,7b0aa24..6278b90
--- a/src/interfaces/python/opengm/learning/__init__.py
+++ b/src/interfaces/python/opengm/learning/__init__.py
@@@ -253,28 -252,17 +252,34 @@@ def structMaxMarginLearner(dataset, reg
          
          return learner
      else:
 -        raise RuntimeError("this learner needs widthCplex or withGurobi")
 +        raise RuntimeError("this learner needs withCplex or withGurobi")
  
  
- def maxLikelihoodLearner(dataset):
-     #raise RuntimeError("not yet implemented / wrapped fully")
+ def maxLikelihoodLearner(dataset, maxIterations=1000, reg=1.0, temp=1.0):
      learnerCls = MaxLikelihood_FlexibleLoss
      learnerParamCls = MaxLikelihood_FlexibleLossParameter
  
++<<<<<<< HEAD
 +    param = learnerParamCls(
 +        maximumNumberOfIterations,
 +        gradientStep,
 +        weightAccuracy,
 +        gradientStoppingCriteria,
 +        infoFlag,
 +        infoEveryStep,
 +	beliefPropagationMaximumNumberOfIterations,
 +	beliefPropagationConvergenceBound,
 +	beliefPropagationDamping,
 +	beliefPropagationReg,
 +	beliefPropagationTemperature
 +    )
 +
++=======
+     param = learnerParamCls()
+     param.maxIterations = int(maxIterations)
+     param.reg = float(reg)
+     param.temperature = float(temp)
++>>>>>>> 2ba1371ee0e19735fd712bb76b3dac678b871beb
      learner = learnerCls(dataset, param)
          
      return learner
@@@ -378,3 -366,308 +383,308 @@@ def lPottsFunctions(weights, numberOfLa
      res.__dict__['_features_'] = wid
      res.__dict__['_weights_'] = ff
      return res
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ def getPbar(size, name):
+     widgets = ['%s: '%name, Percentage(), ' ', Bar(marker='0',left='[',right=']'),
+                ' ', ETA(), ' ', FileTransferSpeed()] #see docs for other options
+     pbar = ProgressBar(widgets=widgets, maxval=size)
+     return pbar
+ 
+ def secondOrderImageDataset(imgs, gts, numberOfLabels, fUnary, fBinary, addConstFeature, trainFraction=0.75):
+     #try:
+     #    import vigra
+     #    from progressbar import *
+     #except:
+     #    pass
+ 
+     # train test
+     nImg = len(imgs)
+     nTrain = int(float(nImg)*trainFraction+0.5)
+     nTest = (nImg-nTrain)
+     
+     def getFeat(fComp, im):
+         res = []
+         for f in fComp:
+             r = f(im)
+             if r.ndim == 2:
+                 r = r[:,:, None]
+             res.append(r)
+         return res
+ 
+     # compute features for a single image
+     tImg = imgs[0]
+     unaryFeat = getFeat(fUnary, tImg)
+     unaryFeat = numpy.nan_to_num(numpy.concatenate(unaryFeat,axis=2).view(numpy.ndarray))
+     nUnaryFeat = unaryFeat.shape[-1] + int(addConstFeature)
+     nUnaryFeat *= numberOfLabels - int(numberOfLabels==2)
+ 
+     if len(fBinary)>0:
+         binaryFeat = getFeat(fBinary, tImg)
+         binaryFeat = numpy.nan_to_num(numpy.concatenate(binaryFeat,axis=2).view(numpy.ndarray))
+         nBinaryFeat = binaryFeat.shape[-1] + int(addConstFeature)
+         nWeights  = nUnaryFeat + nBinaryFeat
+     else:
+         nBinaryFeat = 0
+     print "------------------------------------------------"
+     print "nTrain",nTrain,"nTest",nTest
+     print "nWeights",nWeights,"(",nUnaryFeat,nBinaryFeat,")"
+     print "------------------------------------------------"
+ 
+     train_set = []
+     tentative_test_set = []
+ 
+     for i,(img,gt) in enumerate(zip(imgs,gts)):
+         if(i<nTrain):
+             train_set.append((img,gt))
+         else:
+             tentative_test_set.append((img,gt))
+ 
+ 
+     dataset = createDataset(numWeights=nWeights)
+     weights = dataset.getWeights()
+     uWeightIds = numpy.arange(nUnaryFeat ,dtype='uint64')
+     if numberOfLabels != 2:
+         uWeightIds = uWeightIds.reshape([numberOfLabels,-1])
+     else:
+         uWeightIds = uWeightIds.reshape([1,-1])
+     bWeightIds = numpy.arange(start=nUnaryFeat,stop=nWeights,dtype='uint64')
+ 
+     def makeModel(img,gt):
+         shape = gt.shape[0:2]
+         numVar = shape[0] * shape[1]
+ 
+         # make model
+         gm = graphicalModel(numpy.ones(numVar)*numberOfLabels)
+ 
+ 
+ 
+ 
+         # compute features
+         unaryFeat = getFeat(fUnary, img)
+         unaryFeat = numpy.nan_to_num(numpy.concatenate(unaryFeat,axis=2).view(numpy.ndarray))
+         unaryFeat  = unaryFeat.reshape([numVar,-1])
+         
+ 
+ 
+ 
+         # add unaries
+         lUnaries = lUnaryFunctions(weights =weights,numberOfLabels = numberOfLabels, 
+                                     features=unaryFeat, weightIds = uWeightIds,
+                                     featurePolicy= FeaturePolicy.sharedBetweenLabels,
+                                     makeFirstEntryConst=numberOfLabels==2, addConstFeature=addConstFeature)
+         fids = gm.addFunctions(lUnaries)
+         gm.addFactors(fids, numpy.arange(numVar))
+ 
+ 
+         if len(fBinary)>0:
+             binaryFeat = getFeat(fBinary, img)
+             binaryFeat = numpy.nan_to_num(numpy.concatenate(binaryFeat,axis=2).view(numpy.ndarray))
+             binaryFeat  = binaryFeat.reshape([numVar,-1])
+ 
+             # add second order
+             vis2Order=gridVis(shape[0:2],True)
+ 
+             fU = binaryFeat[vis2Order[:,0],:]
+             fV = binaryFeat[vis2Order[:,1],:]
+             fB  = (fU +  fV / 2.0)
+             lp = lPottsFunctions(weights=weights, numberOfLabels=numberOfLabels,
+                                           features=fB, weightIds=bWeightIds,
+                                           addConstFeature=addConstFeature)
+             gm.addFactors(gm.addFunctions(lp), vis2Order) 
+ 
+         return gm
+ 
+     # make training models
+     pbar = getPbar(nTrain,"Training Models")
+     pbar.start()
+     for i,(img,gt) in enumerate(train_set):
+         gm = makeModel(img, gt)
+         dataset.pushBackInstance(gm,gt.reshape(-1).astype(label_type))
+         pbar.update(i)
+     pbar.finish()
+ 
+ 
+     # make test models
+     test_set = []
+     pbar = getPbar(nTest,"Test Models")
+     pbar.start()
+     for i,(img,gt) in enumerate(tentative_test_set):
+         gm = makeModel(img, gt)
+         test_set.append((img, gt, gm))
+         pbar.update(i)
+     pbar.finish()
+ 
+     return dataset, test_set
+ 
+ 
+ 
+ def superpixelDataset(imgs,sps, gts, numberOfLabels, fUnary, fBinary, addConstFeature, trainFraction=0.75):
+     try:
+         import vigra
+     except:
+         raise ImportError("cannot import vigra which is needed for superpixelDataset")
+ 
+     # train test
+     nImg = len(imgs)
+     nTrain = int(float(nImg)*trainFraction+0.5)
+     nTest = (nImg-nTrain)
+     
+     def getFeat(fComp, im, topoShape=False):
+         res = []
+         if(topoShape):
+             shape = im.shape[0:2]
+             tshape = [2*s-1 for s in shape]
+             iiimg = vigra.sampling.resize(im, tshape)
+         else:
+             iiimg = im
+         for f in fComp:
+             r = f(iiimg)
+             if r.ndim == 2:
+                 r = r[:,:, None]
+             res.append(r)
+         return res
+ 
+     # compute features for a single image
+     tImg = imgs[0]
+     unaryFeat = getFeat(fUnary, tImg)
+     unaryFeat = numpy.nan_to_num(numpy.concatenate(unaryFeat,axis=2).view(numpy.ndarray))
+     nUnaryFeat = unaryFeat.shape[-1] + int(addConstFeature)
+     nUnaryFeat *= numberOfLabels - int(numberOfLabels==2)
+     if len(fBinary)>0:
+         binaryFeat = getFeat(fBinary, tImg)
+         binaryFeat = numpy.nan_to_num(numpy.concatenate(binaryFeat,axis=2).view(numpy.ndarray))
+         nBinaryFeat = binaryFeat.shape[-1] + int(addConstFeature)
+     else:
+         nBinaryFeat =0
+ 
+     nWeights  = nUnaryFeat + nBinaryFeat
+ 
+     print "------------------------------------------------"
+     print "nTrain",nTrain,"nTest",nTest
+     print "nWeights",nWeights,"(",nUnaryFeat,nBinaryFeat,")"
+     print "------------------------------------------------"
+ 
+     train_set = []
+     tentative_test_set = []
+ 
+     for i,(img,sp,gt) in enumerate(zip(imgs,sps,gts)):
+         if(i<nTrain):
+             train_set.append((img,sp,gt))
+         else:
+             tentative_test_set.append((img,sp,gt))
+ 
+ 
+     dataset = createDataset(numWeights=nWeights)
+     weights = dataset.getWeights()
+     uWeightIds = numpy.arange(nUnaryFeat ,dtype='uint64')
+     if numberOfLabels != 2:
+         uWeightIds = uWeightIds.reshape([numberOfLabels,-1])
+     else:
+         uWeightIds = uWeightIds.reshape([1,-1])
+ 
+     if len(fBinary)>0:
+         bWeightIds = numpy.arange(start=nUnaryFeat,stop=nWeights,dtype='uint64')
+ 
+ 
+ 
+ 
+ 
+     def makeModel(img,sp,gt):
+         assert sp.min() == 0
+         shape = img.shape[0:2]
+         gg = vigra.graphs.gridGraph(shape)
+         rag = vigra.graphs.regionAdjacencyGraph(gg,sp)
+         numVar = rag.nodeNum
+         assert rag.nodeNum == rag.maxNodeId +1
+ 
+         # make model
+         gm = graphicalModel(numpy.ones(numVar)*numberOfLabels)
+ 
+         assert gm.numberOfVariables == rag.nodeNum 
+         assert gm.numberOfVariables == rag.maxNodeId +1
+ 
+         # compute features
+         unaryFeat = getFeat(fUnary, img)
+         unaryFeat = numpy.nan_to_num(numpy.concatenate(unaryFeat,axis=2).view(numpy.ndarray)).astype('float32')
+         unaryFeat = vigra.taggedView(unaryFeat,'xyc')
+         accList = []
+ 
+         #for c in range(unaryFeat.shape[-1]):
+         #    cUnaryFeat = unaryFeat[:,:,c]
+         #    cAccFeat = rag.accumulateNodeFeatures(cUnaryFeat)[:,None]
+         #    accList.append(cAccFeat)
+         #accUnaryFeat = numpy.concatenate(accList,axis=1)
+         accUnaryFeat = rag.accumulateNodeFeatures(unaryFeat)#[:,None]
+ 
+ 
+         #print accUnaryFeat.shape
+ 
+         #accUnaryFeat = rag.accumulateNodeFeatures(unaryFeat[:,:,:])
+         #accUnaryFeat = vigra.taggedView(accUnaryFeat,'nc')
+         #accUnaryFeat = accUnaryFeat[1:accUnaryFeat.shape[0],:]
+ 
+       
+ 
+ 
+ 
+         #binaryFeat  = binaryFeat.reshape([numVar,-1])
+ 
+ 
+ 
+         # add unaries
+         lUnaries = lUnaryFunctions(weights =weights,numberOfLabels = numberOfLabels, 
+                                             features=accUnaryFeat, weightIds = uWeightIds,
+                                             featurePolicy= FeaturePolicy.sharedBetweenLabels,
+                                             makeFirstEntryConst=numberOfLabels==2, addConstFeature=addConstFeature)
+         fids = gm.addFunctions(lUnaries)
+         gm.addFactors(fids, numpy.arange(numVar))
+ 
+         
+         if len(fBinary)>0:
+             binaryFeat = getFeat(fBinary, img, topoShape=False)
+             binaryFeat = numpy.nan_to_num(numpy.concatenate(binaryFeat,axis=2).view(numpy.ndarray)).astype('float32')
+             edgeFeat = vigra.graphs.edgeFeaturesFromImage(gg, binaryFeat)
+             accBinaryFeat = rag.accumulateEdgeFeatures(edgeFeat)
+ 
+             uvIds =  numpy.sort(rag.uvIds(), axis=1)
+             assert uvIds.min()==0
+             assert uvIds.max()==gm.numberOfVariables-1
+ 
+ 
+ 
+         
+             lp = lPottsFunctions(weights=weights, numberOfLabels=numberOfLabels,
+                                           features=accBinaryFeat, weightIds=bWeightIds,
+                                           addConstFeature=addConstFeature)
+             fids = gm.addFunctions(lp)
+             gm.addFactors(fids, uvIds) 
+ 
+         return gm
+ 
+     # make training models
+     pbar = getPbar(nTrain,"Training Models")
+     pbar.start()
+     for i,(img,sp,gt) in enumerate(train_set):
+         gm = makeModel(img,sp, gt)
+         dataset.pushBackInstance(gm,gt.astype(label_type))
+         pbar.update(i)
+     pbar.finish()
+ 
+ 
+     # make test models
+     test_set = []
+     pbar = getPbar(nTest,"Test Models")
+     pbar.start()
+     for i,(img,sp,gt) in enumerate(tentative_test_set):
+         gm = makeModel(img,sp, gt)
+         test_set.append((img, sp, gm))
+         pbar.update(i)
+     pbar.finish()
+ 
 -    return dataset, test_set
++    return dataset, test_set
diff --cc src/interfaces/python/opengm/learning/pyMaxLikelihoodLearner.cxx
index 00b0667,6c17bbb..c67825c
--- a/src/interfaces/python/opengm/learning/pyMaxLikelihoodLearner.cxx
+++ b/src/interfaces/python/opengm/learning/pyMaxLikelihoodLearner.cxx
@@@ -57,7 -30,9 +57,8 @@@ namespace opengm
          const std::string paramClsName = clsName + std::string("Parameter");
  
          bp::class_<PyLearnerParam>(paramClsName.c_str(), bp::init<>())
 +	  //.def("__init__", make_constructor(&pyMaxLikelihoodParamConstructor<PyLearnerParam> ,boost::python::default_call_policies()))
+             //.def_readwrite("maxIterations", &PyLearnerParam::maxNumSteps_)
 -            .def_readwrite("reg", &PyLearnerParam::reg_)
 -            .def_readwrite("temperature", &PyLearnerParam::temperature_)
          ;
  
          boost::python::class_<PyLearner>( clsName.c_str(), boost::python::init<DatasetType &, const PyLearnerParam &>() )

-- 
Alioth's /usr/local/bin/git-commit-notice on /srv/git.debian.org/git/debian-science/packages/opengm.git



More information about the debian-science-commits mailing list