[opengm] 298/386: removed deprecated function

Ghislain Vaillant ghisvail-guest at moszumanska.debian.org
Wed Aug 31 08:38:15 UTC 2016


This is an automated email from the git hooks/post-receive script.

ghisvail-guest pushed a commit to branch debian/master
in repository opengm.

commit cfe19a049df340f0ce355dc3c16898bbdbae7676
Author: DerThorsten <thorsten.beier at iwr.uni-heidelberg.de>
Date:   Thu Dec 10 16:37:37 2015 -0300

    removed deprecated function
---
 .../auxiliary/lp_solver/lp_solver_interface.hxx    | 12 +++++---
 include/opengm/inference/lpcplex.hxx               |  3 ++
 include/opengm/learning/bundle-optimizer.hxx       | 15 +++++-----
 include/opengm/learning/struct-max-margin.hxx      |  8 +++---
 .../python/opengm/learning/pyLFunctionGen.cxx      |  2 +-
 .../python/opengm/opengmcore/pyFunctionTypes.cxx   | 33 +++++++++++-----------
 6 files changed, 41 insertions(+), 32 deletions(-)

diff --git a/include/opengm/inference/auxiliary/lp_solver/lp_solver_interface.hxx b/include/opengm/inference/auxiliary/lp_solver/lp_solver_interface.hxx
index e27b290..45e4e90 100644
--- a/include/opengm/inference/auxiliary/lp_solver/lp_solver_interface.hxx
+++ b/include/opengm/inference/auxiliary/lp_solver/lp_solver_interface.hxx
@@ -631,10 +631,14 @@ namespace opengm {
 template <class LP_SOLVER_TYPE, class VALUE_TYPE, class INDEX_TYPE, class SOLUTION_ITERATOR_TYPE, class SOLVER_TIMING_TYPE>
 inline LPSolverInterface<LP_SOLVER_TYPE, VALUE_TYPE, INDEX_TYPE, SOLUTION_ITERATOR_TYPE, SOLVER_TIMING_TYPE>::Parameter::Parameter()
    : numberOfThreads_(LPDef::default_numberOfThreads_),
-     verbose_(LPDef::default_verbose_), cutUp_(LPDef::default_cutUp_),
-     epOpt_(LPDef::default_epOpt_), epMrk_(LPDef::default_epMrk_),
-     epRHS_(LPDef::default_epRHS_), epInt_(LPDef::default_epInt_),
-     epAGap_(LPDef::default_epAGap_), epGap_(LPDef::default_epGap_),
+     verbose_(LPDef::default_verbose_), 
+     cutUp_(LPDef::default_cutUp_),
+     epOpt_(LPDef::default_epOpt_), 
+     epMrk_(LPDef::default_epMrk_),
+     epRHS_(LPDef::default_epRHS_), 
+     epInt_(LPDef::default_epInt_),
+     epAGap_(LPDef::default_epAGap_), 
+     epGap_(LPDef::default_epGap_),
      workMem_(LPDef::default_workMem_),
      treeMemoryLimit_(LPDef::default_treeMemoryLimit_),
      timeLimit_(LPDef::default_timeLimit_),
diff --git a/include/opengm/inference/lpcplex.hxx b/include/opengm/inference/lpcplex.hxx
index 07c77f4..0dde612 100644
--- a/include/opengm/inference/lpcplex.hxx
+++ b/include/opengm/inference/lpcplex.hxx
@@ -165,6 +165,9 @@ public:
          disjunctCutLevel_(MIP_CUT_AUTO), 
          gomoryCutLevel_(MIP_CUT_AUTO)
          {
+
+
+            
             numberOfThreads_   = numberOfThreads; 
             integerConstraint_ = false; 
             LPDef lpdef;
diff --git a/include/opengm/learning/bundle-optimizer.hxx b/include/opengm/learning/bundle-optimizer.hxx
index e8e5a7e..e04dd48 100644
--- a/include/opengm/learning/bundle-optimizer.hxx
+++ b/include/opengm/learning/bundle-optimizer.hxx
@@ -75,6 +75,7 @@ public:
 
 		// how to compute the eps for the stopping criterion
 		EpsStrategy epsStrategy;
+        bool verbose_;
 	};
 
 	BundleOptimizer(const Parameter& parameter = Parameter());
@@ -157,12 +158,12 @@ BundleOptimizer<T>::optimize(Oracle& oracle, Weights& w) {
 
 		t++;
 
-        if(oracle.getInfParam().verbose_ )
+        if(_parameter.verbose_)
             std::cout << std::endl << "----------------- iteration      " << t << std::endl;
 
         Weights w_tm1 = w;
 
-        if(oracle.getInfParam().verbose_ ){
+        if(_parameter.verbose_){
             std::cout << "w: ";
             for(size_t i=0; i<w_tm1.size(); ++i)
                 std::cout << w_tm1[i] << " ";
@@ -178,7 +179,7 @@ BundleOptimizer<T>::optimize(Oracle& oracle, Weights& w) {
 		// get current value and gradient
 		oracle(w_tm1, L_w_tm1, a_t);
 
-        if(oracle.getInfParam().verbose_ ){
+        if(_parameter.verbose_){
             std::cout << "       L(w)              is: " << L_w_tm1 << std::endl;
             std::cout << "∂L(w)/∂:  (";
             for(size_t i=0; i<a_t.size(); ++i)
@@ -189,13 +190,13 @@ BundleOptimizer<T>::optimize(Oracle& oracle, Weights& w) {
 		// update smallest observed value of regularized L
 		minValue = std::min(minValue, L_w_tm1 + _parameter.lambda*0.5*dot(w_tm1, w_tm1));
 
-        if(oracle.getInfParam().verbose_ )
+        if(_parameter.verbose_)
             std::cout << " min_i L(w_i) + ½λ|w_i|² is: " << minValue << std::endl;
 
 		// compute hyperplane offset
 		T b_t = L_w_tm1 - dot(w_tm1, a_t);
 
-        if(oracle.getInfParam().verbose_ ){
+        if(_parameter.verbose_){
             std::cout << "adding hyperplane: ( ";
             for(size_t i=0; i<a_t.size(); ++i)
                 std::cout << a_t[i] << " ";
@@ -217,7 +218,7 @@ BundleOptimizer<T>::optimize(Oracle& oracle, Weights& w) {
             norm += w[i]*w[i];
         norm = std::sqrt(norm);
 
-        if(oracle.getInfParam().verbose_ ){
+        if(_parameter.verbose_){
             std::cout << " min_w ℒ(w)   + ½λ|w|²   is: " << minLower << std::endl;
             std::cout << " w* of ℒ(w)   + ½λ|w|²   is: (";
             for(size_t i=0; i<w.size(); ++i)
@@ -237,7 +238,7 @@ BundleOptimizer<T>::optimize(Oracle& oracle, Weights& w) {
 
 		lastMinLower = minLower;
 
-        if(oracle.getInfParam().verbose_ )
+        if(_parameter.verbose_)
             std::cout  << "          ε   is: " << eps_t << std::endl;
 
 		// converged?
diff --git a/include/opengm/learning/struct-max-margin.hxx b/include/opengm/learning/struct-max-margin.hxx
index 815c0aa..8af78ed 100644
--- a/include/opengm/learning/struct-max-margin.hxx
+++ b/include/opengm/learning/struct-max-margin.hxx
@@ -104,8 +104,8 @@ private:
 				// set the weights w in E(x,y) and F(x,y)
 				_dataset.getWeights() = w;
 
-                if(_infParam.verbose_ )
-                    std::cout << std::endl << " MODEL : ";
+                //if(_infParam.verbose_ )
+                //    std::cout << std::endl << " MODEL : ";
 
                 #ifdef WITH_OPENMP
                 omp_lock_t modelLock;
@@ -113,8 +113,8 @@ private:
                 #pragma omp parallel for
                 #endif
                 for (int i = 0; i < _dataset.getNumberOfModels(); i++) {
-                    if(_infParam.verbose_ )
-                        std::cout << i;
+                    // if(_infParam.verbose_ )
+                    //     std::cout << i;
 
                     // lock the model
                     #ifdef WITH_OPENMP
diff --git a/src/interfaces/python/opengm/learning/pyLFunctionGen.cxx b/src/interfaces/python/opengm/learning/pyLFunctionGen.cxx
index 03fde2b..e8bb186 100644
--- a/src/interfaces/python/opengm/learning/pyLFunctionGen.cxx
+++ b/src/interfaces/python/opengm/learning/pyLFunctionGen.cxx
@@ -8,7 +8,7 @@
 #include "opengm/graphicalmodel/weights.hxx"
 #include "opengm/functions/learnable/lpotts.hxx"
 #include "opengm/functions/learnable/lunary.hxx"
-#include "opengm/functions/learnable/lsum_of_experts.hxx"
+#include "opengm/functions/learnable/lweightedsum_of_functions.hxx"
 
 #include "../opengmcore/functionGenBase.hxx"
 
diff --git a/src/interfaces/python/opengm/opengmcore/pyFunctionTypes.cxx b/src/interfaces/python/opengm/opengmcore/pyFunctionTypes.cxx
index 1cee266..f5a343a 100644
--- a/src/interfaces/python/opengm/opengmcore/pyFunctionTypes.cxx
+++ b/src/interfaces/python/opengm/opengmcore/pyFunctionTypes.cxx
@@ -28,7 +28,8 @@
 
 #include "opengm/functions/learnable/lpotts.hxx"
 #include "opengm/functions/learnable/lunary.hxx"
-#include "opengm/functions/learnable/lsum_of_experts.hxx"
+#include "opengm/functions/learnable/lweightedsum_of_functions.hxx"
+
 
 using namespace boost::python;
 
@@ -281,7 +282,7 @@ namespace pyfunction{
    }
 
     template<class FUNCTION>
-    FUNCTION * sumOfExpertsConstructor(
+    FUNCTION * weightedSumOfFunctionsConstructor(
         boost::python::object pyShape,
         opengm::python::PyWeights& pyWeights,
         opengm::python::NumpyView<opengm::python::GmIndexType,1> weightIds,
@@ -466,18 +467,18 @@ void export_functiontypes(){
    typedef IndexType LabelType;
 
    // different function types
-   typedef opengm::ExplicitFunction                      <ValueType,IndexType,LabelType> PyExplicitFunction;
-   typedef opengm::PottsFunction                         <ValueType,IndexType,LabelType> PyPottsFunction;
-   typedef opengm::PottsNFunction                        <ValueType,IndexType,LabelType> PyPottsNFunction;
-   typedef opengm::PottsGFunction                        <ValueType,IndexType,LabelType> PyPottsGFunction;
-   typedef opengm::AbsoluteDifferenceFunction            <ValueType,IndexType,LabelType> PyAbsoluteDifferenceFunction;
-   typedef opengm::TruncatedAbsoluteDifferenceFunction   <ValueType,IndexType,LabelType> PyTruncatedAbsoluteDifferenceFunction;
-   typedef opengm::SquaredDifferenceFunction             <ValueType,IndexType,LabelType> PySquaredDifferenceFunction;
-   typedef opengm::TruncatedSquaredDifferenceFunction    <ValueType,IndexType,LabelType> PyTruncatedSquaredDifferenceFunction;
-   typedef opengm::SparseFunction                        <ValueType,IndexType,LabelType> PySparseFunction; 
-   typedef opengm::functions::learnable::LPotts          <ValueType,IndexType,LabelType> PyLPottsFunction;
-   typedef opengm::functions::learnable::LUnary          <ValueType,IndexType,LabelType> PyLUnaryFunction;
-   typedef opengm::functions::learnable::LSumOfExperts   <ValueType,IndexType,LabelType> PyLSumOfExpertsFunction;
+   typedef opengm::ExplicitFunction                                <ValueType,IndexType,LabelType> PyExplicitFunction;
+   typedef opengm::PottsFunction                                   <ValueType,IndexType,LabelType> PyPottsFunction;
+   typedef opengm::PottsNFunction                                  <ValueType,IndexType,LabelType> PyPottsNFunction;
+   typedef opengm::PottsGFunction                                  <ValueType,IndexType,LabelType> PyPottsGFunction;
+   typedef opengm::AbsoluteDifferenceFunction                      <ValueType,IndexType,LabelType> PyAbsoluteDifferenceFunction;
+   typedef opengm::TruncatedAbsoluteDifferenceFunction             <ValueType,IndexType,LabelType> PyTruncatedAbsoluteDifferenceFunction;
+   typedef opengm::SquaredDifferenceFunction                       <ValueType,IndexType,LabelType> PySquaredDifferenceFunction;
+   typedef opengm::TruncatedSquaredDifferenceFunction              <ValueType,IndexType,LabelType> PyTruncatedSquaredDifferenceFunction;
+   typedef opengm::SparseFunction                                  <ValueType,IndexType,LabelType> PySparseFunction; 
+   typedef opengm::functions::learnable::LPotts                    <ValueType,IndexType,LabelType> PyLPottsFunction;
+   typedef opengm::functions::learnable::LUnary                    <ValueType,IndexType,LabelType> PyLUnaryFunction;
+   typedef opengm::functions::learnable::LWeightedSumOfFunctions   <ValueType,IndexType,LabelType> PyLSumOfWeightedFunction;
 
    // vector exporters
    export_function_type_vector<PyExplicitFunction>("ExplicitFunctionVector");
@@ -761,8 +762,8 @@ void export_functiontypes(){
     )
     ;
 
-    FUNCTION_TYPE_EXPORTER_HELPER(PyLSumOfExpertsFunction,"SumOfExpertsFunction")
-    .def("__init__", make_constructor(&pyfunction::sumOfExpertsConstructor<PyLSumOfExpertsFunction> ,default_call_policies(),
+    FUNCTION_TYPE_EXPORTER_HELPER(PyLSumOfWeightedFunction,"SumOfExpertsFunction")
+    .def("__init__", make_constructor(&pyfunction::weightedSumOfFunctionsConstructor<PyLSumOfWeightedFunction> ,default_call_policies(),
          (
             boost::python::arg("shape"),
             boost::python::arg("weight"),

-- 
Alioth's /usr/local/bin/git-commit-notice on /srv/git.debian.org/git/debian-science/packages/opengm.git



More information about the debian-science-commits mailing list