[opengm] 17/386: checkin of linear and quadratic solver and Gurobi backend for learning

Ghislain Vaillant ghisvail-guest at moszumanska.debian.org
Wed Aug 31 08:34:59 UTC 2016


This is an automated email from the git hooks/post-receive script.

ghisvail-guest pushed a commit to branch debian/master
in repository opengm.

commit cd0436e7ea6a16df6ef98cc4f751f108fd903e8f
Author: Jan Funke <funke at ini.ch>
Date:   Thu Jun 19 18:29:44 2014 -0400

    checkin of linear and quadratic solver and Gurobi backend for learning
---
 include/opengm/learning/bundle-optimizer.hxx       |  19 +
 include/opengm/learning/solver/GurobiBackend.h     | 451 +++++++++++++++++++++
 include/opengm/learning/solver/LinearConstraint.h  |  93 +++++
 include/opengm/learning/solver/LinearConstraints.h | 118 ++++++
 include/opengm/learning/solver/LinearObjective.h   |  24 ++
 .../opengm/learning/solver/LinearSolverBackend.h   |  84 ++++
 .../opengm/learning/solver/QuadraticObjective.h    | 180 ++++++++
 .../learning/solver/QuadraticSolverBackend.h       |  28 ++
 .../learning/solver/QuadraticSolverFactory.h       |  29 ++
 .../learning/solver/QuadraticSolverParameters.h    |  15 +
 include/opengm/learning/solver/Relation.h          |  20 +
 include/opengm/learning/solver/Sense.h             |  20 +
 include/opengm/learning/solver/Solution.h          |  49 +++
 include/opengm/learning/solver/VariableType.h      |  18 +
 src/unittest/CMakeLists.txt                        |   7 +-
 15 files changed, 1153 insertions(+), 2 deletions(-)

diff --git a/include/opengm/learning/bundle-optimizer.hxx b/include/opengm/learning/bundle-optimizer.hxx
index dd8de4e..72285f8 100644
--- a/include/opengm/learning/bundle-optimizer.hxx
+++ b/include/opengm/learning/bundle-optimizer.hxx
@@ -2,6 +2,8 @@
 #ifndef OPENGM_LEARNING_BUNDLE_OPTIMIZER_HXX
 #define OPENGM_LEARNING_BUNDLE_OPTIMIZER_HXX
 
+#include "solver/QuadraticSolverFactory.h"
+
 namespace opengm {
 
 namespace learning {
@@ -36,6 +38,10 @@ public:
 		unsigned int steps;
 	};
 
+	BundleOptimizer();
+
+	~BundleOptimizer();
+
 	/**
 	 * Start the bundle method optimization on the given dataset. It is assumed 
 	 * that the models in the dataset were already augmented by the loss.
@@ -50,9 +56,22 @@ private:
 	void findMinLowerBound(std::vector<ValueType>& w, ValueType& value);
 
 	ValueType dot(const std::vector<ValueType>& a, const std::vector<ValueType>& b);
+
+	solver::QuadraticSolverBackend* _solver;
 };
 
 template <typename T>
+BundleOptimizer<T>::BundleOptimizer() :
+	_solver(0) {}
+
+template <typename T>
+BundleOptimizer<T>::~BundleOptimizer() {
+
+	if (_solver)
+		delete _solver;
+}
+
+template <typename T>
 template <typename DatasetType>
 OptimizerResult
 BundleOptimizer<T>::optimize(const DatasetType& dataset, typename DatasetType::ModelParameters& w) {
diff --git a/include/opengm/learning/solver/GurobiBackend.h b/include/opengm/learning/solver/GurobiBackend.h
new file mode 100644
index 0000000..488f5f5
--- /dev/null
+++ b/include/opengm/learning/solver/GurobiBackend.h
@@ -0,0 +1,451 @@
+#ifndef OPENGM_LEARNING_SOLVER_GUROBI_SOLVER_H__
+#define OPENGM_LEARNING_SOLVER_GUROBI_SOLVER_H__
+
+#ifdef WITH_GUROBI
+
+#include <string>
+
+#include <gurobi_c++.h>
+
+#include "LinearConstraints.h"
+#include "QuadraticObjective.h"
+#include "QuadraticSolverBackend.h"
+#include "Sense.h"
+#include "Solution.h"
+
+namespace opengm {
+namespace learning {
+namespace solver {
+
+/**
+ * Gurobi interface to solve the following (integer) quadratic program:
+ *
+ * min  <a,x> + xQx
+ * s.t. Ax  == b
+ *      Cx  <= d
+ *      optionally: x_i \in {0,1} for all i
+ *
+ * Where (A,b) describes all linear equality constraints, (C,d) all linear
+ * inequality constraints and x is the solution vector. a is a real-valued
+ * vector denoting the coefficients of the objective and Q a PSD matrix giving
+ * the quadratic coefficients of the objective.
+ */
+class GurobiBackend : public QuadraticSolverBackend {
+
+public:
+
+	struct Parameter {
+
+		Parameter() :
+			mipGap(0.0001),
+			mipFocus(0),
+			numThreads(0),
+			verbose(false) {}
+
+		// The Gurobi relative optimality gap.
+		double mipGap;
+
+		// The Gurobi MIP focus: 0 = balanced, 1 = feasible solutions, 2 = 
+		// optimal solution, 3 = bound.
+		unsigned int mipFocus;
+
+		// The number of threads to be used by Gurobi. The default (0) uses all 
+		// available CPUs.
+		unsigned int numThreads;
+
+		// Show the gurobi output.
+		bool verbose;
+	};
+
+	GurobiBackend(const Parameter& parameter = Parameter());
+
+	virtual ~GurobiBackend();
+
+	///////////////////////////////////
+	// solver backend implementation //
+	///////////////////////////////////
+
+	void initialize(
+			unsigned int numVariables,
+			VariableType variableType);
+
+	void initialize(
+			unsigned int                                numVariables,
+			VariableType                                defaultVariableType,
+			const std::map<unsigned int, VariableType>& specialVariableTypes);
+
+	void setObjective(const LinearObjective& objective);
+
+	void setObjective(const QuadraticObjective& objective);
+
+	void setConstraints(const LinearConstraints& constraints);
+
+	void addConstraint(const LinearConstraint& constraint);
+
+	bool solve(Solution& solution, double& value, std::string& message);
+
+private:
+
+	//////////////
+	// internal //
+	//////////////
+
+	// dump the current problem to a file
+	void dumpProblem(std::string filename);
+
+	// set the optimality gap
+	void setMIPGap(double gap);
+
+	// set the mpi focus
+	void setMIPFocus(unsigned int focus);
+
+	// set the number of threads to use
+	void setNumThreads(unsigned int numThreads);
+
+	/**
+	 * Enable solver output.
+	 */
+	void setVerbose(bool verbose);
+
+	// size of a and x
+	unsigned int _numVariables;
+
+	// rows in A
+	unsigned int _numEqConstraints;
+
+	// rows in C
+	unsigned int _numIneqConstraints;
+
+	Parameter _parameter;
+
+	// the GRB environment
+	GRBEnv _env;
+
+	// the (binary) variables x
+	GRBVar* _variables;
+
+	// the objective
+	GRBQuadExpr _objective;
+
+	std::vector<GRBConstr> _constraints;
+
+	// the GRB model containing the objective and constraints
+	GRBModel _model;
+
+	// the verbosity of the output
+	int _verbosity;
+
+	// a value by which to scale the objective
+	double _scale;
+};
+
+GurobiBackend::GurobiBackend(const Parameter& parameter) :
+	_parameter(parameter),
+	_variables(0),
+	_model(_env) {
+}
+
+GurobiBackend::~GurobiBackend() {
+
+	std::cout << "destructing gurobi solver..." << std::endl;
+
+	if (_variables)
+		delete[] _variables;
+}
+
+void
+GurobiBackend::initialize(
+		unsigned int numVariables,
+		VariableType variableType) {
+
+	initialize(numVariables, variableType, std::map<unsigned int, VariableType>());
+}
+
+void
+GurobiBackend::initialize(
+		unsigned int                                numVariables,
+		VariableType                                defaultVariableType,
+		const std::map<unsigned int, VariableType>& specialVariableTypes) {
+
+	if (_parameter.verbose)
+		setVerbose(true);
+	else
+		setVerbose(false);
+
+	setMIPGap(_parameter.mipGap);
+
+	if (_parameter.mipFocus <= 3)
+		setMIPFocus(_parameter.mipFocus);
+	else
+		std::cerr << "Invalid value for MPI focus!" << std::endl;
+
+	setNumThreads(_parameter.numThreads);
+
+	_numVariables = numVariables;
+
+	// delete previous variables
+	if (_variables)
+		delete[] _variables;
+
+	// add new variables to the model
+	if (defaultVariableType == Binary) {
+
+		std::cout << "creating " << _numVariables << " binary variables" << std::endl;
+
+		_variables = _model.addVars(_numVariables, GRB_BINARY);
+
+		_model.update();
+
+	} else if (defaultVariableType == Continuous) {
+
+		std::cout << "creating " << _numVariables << " continuous variables" << std::endl;
+
+		_variables = _model.addVars(_numVariables, GRB_CONTINUOUS);
+
+		_model.update();
+
+		// remove default lower bound on variables
+		for (unsigned int i = 0; i < _numVariables; i++)
+			_variables[i].set(GRB_DoubleAttr_LB, -GRB_INFINITY);
+
+	} else if (defaultVariableType == Integer) {
+
+		std::cout << "creating " << _numVariables << " integer variables" << std::endl;
+
+		_variables = _model.addVars(_numVariables, GRB_INTEGER);
+
+		_model.update();
+
+		// remove default lower bound on variables
+		for (unsigned int i = 0; i < _numVariables; i++)
+			_variables[i].set(GRB_DoubleAttr_LB, -GRB_INFINITY);
+	}
+
+	// handle special variable types
+	typedef std::map<unsigned int, VariableType>::const_iterator VarTypeIt;
+	for (VarTypeIt i = specialVariableTypes.begin(); i != specialVariableTypes.end(); i++) {
+
+		unsigned int v = i->first;
+		VariableType type = i->second;
+
+		char t = (type == Binary ? 'B' : (type == Integer ? 'I' : 'C'));
+		_variables[v].set(GRB_CharAttr_VType, t);
+	}
+
+	std::cout << "creating " << _numVariables << " ceofficients" << std::endl;
+}
+
+void
+GurobiBackend::setObjective(const LinearObjective& objective) {
+
+	setObjective((QuadraticObjective)objective);
+}
+
+void
+GurobiBackend::setObjective(const QuadraticObjective& objective) {
+
+	try {
+
+		// set sense of objective
+		if (objective.getSense() == Minimize)
+			_model.set(GRB_IntAttr_ModelSense, 1);
+		else
+			_model.set(GRB_IntAttr_ModelSense, -1);
+
+		// set the constant value of the objective
+		_objective = objective.getConstant();
+
+		std::cout << "setting linear coefficients" << std::endl;
+
+		_objective.addTerms(&objective.getCoefficients()[0], _variables, _numVariables);
+
+		// set the quadratic coefficients for all pairs of variables
+		std::cout << "setting quadratic coefficients" << std::endl;
+
+		typedef std::map<std::pair<unsigned int, unsigned int>, double>::const_iterator QuadCoefIt;
+		for (QuadCoefIt i = objective.getQuadraticCoefficients().begin(); i != objective.getQuadraticCoefficients().end(); i++) {
+
+			const std::pair<unsigned int, unsigned int>& variables = i->first;
+			float value = i->second;
+
+			if (value != 0)
+				_objective += _variables[variables.first]*_variables[variables.second]*value;
+		}
+
+		_model.setObjective(_objective);
+
+		_model.update();
+
+	} catch (GRBException e) {
+
+		std::cerr << "error: " << e.getMessage() << std::endl;
+	}
+}
+
+void
+GurobiBackend::setConstraints(const LinearConstraints& constraints) {
+
+	// remove previous constraints
+	for (std::vector<GRBConstr>::iterator constraint = _constraints.begin(); constraint != _constraints.end(); constraint++)
+		_model.remove(*constraint);
+	_constraints.clear();
+
+	_model.update();
+
+	// allocate memory for new constraints
+	_constraints.reserve(constraints.size());
+
+	try {
+
+		std::cout << "setting " << constraints.size() << " constraints" << std::endl;
+
+		unsigned int j = 0;
+		for (LinearConstraints::const_iterator constraint = constraints.begin(); constraint != constraints.end(); constraint++) {
+
+			// create the lhs expression
+			GRBLinExpr lhsExpr;
+
+			// set the coefficients
+			typedef std::map<unsigned int, double>::const_iterator CoefIt;
+			for (CoefIt pair = constraint->getCoefficients().begin(); pair != constraint->getCoefficients().end(); pair++)
+				lhsExpr += pair->second*_variables[pair->first];
+
+			// add to the model
+			_constraints.push_back(
+					_model.addConstr(
+						lhsExpr,
+						(constraint->getRelation() == LessEqual ? GRB_LESS_EQUAL :
+								(constraint->getRelation() == GreaterEqual ? GRB_GREATER_EQUAL :
+										GRB_EQUAL)),
+						constraint->getValue()));
+
+			j++;
+		}
+
+		_model.update();
+
+	} catch (GRBException e) {
+
+		std::cerr << "error: " << e.getMessage() << std::endl;
+	}
+}
+
+void
+GurobiBackend::addConstraint(const LinearConstraint& constraint) {
+
+	try {
+
+		std::cout << "adding a constraint" << std::endl;
+
+
+		// create the lhs expression
+		GRBLinExpr lhsExpr;
+
+		// set the coefficients
+		typedef std::map<unsigned int, double>::const_iterator CoefIt;
+		for (CoefIt pair = constraint.getCoefficients().begin(); pair != constraint.getCoefficients().end(); pair++)
+			lhsExpr += pair->second*_variables[pair->first];
+
+		// add to the model
+		_constraints.push_back(
+				_model.addConstr(
+					lhsExpr,
+					(constraint.getRelation() == LessEqual ? GRB_LESS_EQUAL :
+							(constraint.getRelation() == GreaterEqual ? GRB_GREATER_EQUAL :
+									GRB_EQUAL)),
+					constraint.getValue()));
+
+		_model.update();
+
+	} catch (GRBException e) {
+
+		std::cerr << "error: " << e.getMessage() << std::endl;
+	}
+}
+
+bool
+GurobiBackend::solve(Solution& x, double& value, std::string& msg) {
+
+	try {
+
+		_model.optimize();
+
+		int status = _model.get(GRB_IntAttr_Status);
+
+		if (status != GRB_OPTIMAL) {
+			msg = "Optimal solution *NOT* found";
+			return false;
+		} else
+			msg = "Optimal solution found";
+
+		// extract solution
+
+		x.resize(_numVariables);
+		for (unsigned int i = 0; i < _numVariables; i++)
+			x[i] = _variables[i].get(GRB_DoubleAttr_X);
+
+		// get current value of the objective
+		value = _model.get(GRB_DoubleAttr_ObjVal);
+
+		x.setValue(value);
+
+	} catch (GRBException e) {
+
+		std::cerr << "error: " << e.getMessage() << std::endl;
+
+		msg = e.getMessage();
+
+		return false;
+	}
+
+	return true;
+}
+
+void
+GurobiBackend::setMIPGap(double gap) {
+
+	_model.getEnv().set(GRB_DoubleParam_MIPGap, gap);
+}
+
+void
+GurobiBackend::setMIPFocus(unsigned int focus) {
+
+	_model.getEnv().set(GRB_IntParam_MIPFocus, focus);
+}
+
+void
+GurobiBackend::setNumThreads(unsigned int numThreads) {
+
+	_model.getEnv().set(GRB_IntParam_Threads, numThreads);
+}
+
+void
+GurobiBackend::setVerbose(bool verbose) {
+
+	// setup GRB environment
+	if (verbose)
+		_model.getEnv().set(GRB_IntParam_OutputFlag, 1);
+	else
+		_model.getEnv().set(GRB_IntParam_OutputFlag, 0);
+}
+
+void
+GurobiBackend::dumpProblem(std::string filename) {
+
+	try {
+
+		_model.write(filename);
+
+	} catch (GRBException e) {
+
+		std::cerr << "error: " << e.getMessage() << std::endl;
+	}
+}
+
+}}} // namespace opengm::learning::solver
+
+#endif // WITH_GUROBI
+
+#endif // GUROBI_OPENGM_LEARNING_SOLVER_SOLVER_H__
+
+
diff --git a/include/opengm/learning/solver/LinearConstraint.h b/include/opengm/learning/solver/LinearConstraint.h
new file mode 100644
index 0000000..5ffa7f3
--- /dev/null
+++ b/include/opengm/learning/solver/LinearConstraint.h
@@ -0,0 +1,93 @@
+#ifndef INFERENCE_LINEAR_CONSTRAINT_H__
+#define INFERENCE_LINEAR_CONSTRAINT_H__
+
+#include <map>
+
+#include "Relation.h"
+
+namespace opengm {
+namespace learning {
+namespace solver {
+
+/**
+ * A sparse linear constraint.
+ */
+class LinearConstraint {
+
+public:
+
+	LinearConstraint();
+
+	void setCoefficient(unsigned int varNum, double coef);
+
+	void setRelation(Relation relation);
+
+	void setValue(double value);
+
+	const std::map<unsigned int, double>& getCoefficients() const;
+
+	const Relation& getRelation() const;
+
+	double getValue() const;
+
+private:
+
+	std::map<unsigned int, double> _coefs;
+
+	Relation _relation;
+
+	double _value;
+};
+
+LinearConstraint::LinearConstraint() :
+	_relation(LessEqual) {}
+
+void
+LinearConstraint::setCoefficient(unsigned int varNum, double coef) {
+
+	if (coef == 0) {
+
+		std::map<unsigned int, double>::iterator i = _coefs.find(varNum);
+		if (i != _coefs.end())
+			_coefs.erase(_coefs.find(varNum));
+
+	} else {
+
+		_coefs[varNum] = coef;
+	}
+}
+
+void
+LinearConstraint::setRelation(Relation relation) {
+
+	_relation = relation;
+}
+
+void
+LinearConstraint::setValue(double value) {
+
+	_value = value;
+}
+
+const std::map<unsigned int, double>&
+LinearConstraint::getCoefficients() const {
+
+	return _coefs;
+}
+
+const Relation&
+LinearConstraint::getRelation() const {
+
+	return _relation;
+}
+
+double
+LinearConstraint::getValue() const {
+
+	return _value;
+}
+
+}}} // namspace opengm::learning::solver
+
+#endif // INFERENCE_LINEAR_CONSTRAINT_H__
+
diff --git a/include/opengm/learning/solver/LinearConstraints.h b/include/opengm/learning/solver/LinearConstraints.h
new file mode 100644
index 0000000..3468643
--- /dev/null
+++ b/include/opengm/learning/solver/LinearConstraints.h
@@ -0,0 +1,118 @@
+#ifndef INFERENCE_LINEAR_CONSTRAINTS_H__
+#define INFERENCE_LINEAR_CONSTRAINTS_H__
+
+#include "LinearConstraint.h"
+
+namespace opengm {
+namespace learning {
+namespace solver {
+
+class LinearConstraints {
+
+	typedef std::vector<LinearConstraint> linear_constraints_type;
+
+public:
+
+	typedef linear_constraints_type::iterator       iterator;
+
+	typedef linear_constraints_type::const_iterator const_iterator;
+
+	/**
+	 * Create a new set of linear constraints and allocate enough memory to hold
+	 * 'size' linear constraints. More or less constraints can be added, but
+	 * memory might be wasted (if more allocated then necessary) or unnecessary
+	 * reallocations might occur (if more added than allocated).
+	 *
+	 * @param size The number of linear constraints to reserve memory for.
+	 */
+	LinearConstraints(size_t size = 0);
+
+	/**
+	 * Remove all constraints from this set of linear constraints.
+	 */
+	void clear() { _linearConstraints.clear(); }
+
+	/**
+	 * Add a linear constraint.
+	 *
+	 * @param linearConstraint The linear constraint to add.
+	 */
+	void add(const LinearConstraint& linearConstraint);
+
+	/**
+	 * Add a set of linear constraints.
+	 *
+	 * @param linearConstraints The set of linear constraints to add.
+	 */
+	void addAll(const LinearConstraints& linearConstraints);
+
+	/**
+	 * @return The number of linear constraints in this set.
+	 */
+	unsigned int size() const { return _linearConstraints.size(); }
+
+	const const_iterator begin() const { return _linearConstraints.begin(); }
+
+	iterator begin() { return _linearConstraints.begin(); }
+
+	const const_iterator end() const { return _linearConstraints.end(); }
+
+	iterator end() { return _linearConstraints.end(); }
+
+	const LinearConstraint& operator[](size_t i) const { return _linearConstraints[i]; }
+
+	LinearConstraint& operator[](size_t i) { return _linearConstraints[i]; }
+
+	/**
+	 * Get a list of indices of linear constraints that use the given variables.
+	 */
+	std::vector<unsigned int> getConstraints(const std::vector<unsigned int>& variableIds);
+
+private:
+
+	linear_constraints_type _linearConstraints;
+};
+
+LinearConstraints::LinearConstraints(size_t size) {
+
+	_linearConstraints.resize(size);
+}
+
+void
+LinearConstraints::add(const LinearConstraint& linearConstraint) {
+
+	_linearConstraints.push_back(linearConstraint);
+}
+
+void
+LinearConstraints::addAll(const LinearConstraints& linearConstraints) {
+
+	_linearConstraints.insert(_linearConstraints.end(), linearConstraints.begin(), linearConstraints.end());
+}
+
+std::vector<unsigned int>
+LinearConstraints::getConstraints(const std::vector<unsigned int>& variableIds) {
+
+	std::vector<unsigned int> indices;
+
+	for (unsigned int i = 0; i < size(); i++) {
+
+		LinearConstraint& constraint = _linearConstraints[i];
+
+		for (std::vector<unsigned int>::const_iterator v = variableIds.begin(); v != variableIds.end(); v++) {
+
+			if (constraint.getCoefficients().count(*v) != 0) {
+
+				indices.push_back(i);
+				break;
+			}
+		}
+	}
+
+	return indices;
+}
+
+}}} // namespace opengm::learning::solver
+
+#endif // INFERENCE_LINEAR_CONSTRAINTS_H__
+
diff --git a/include/opengm/learning/solver/LinearObjective.h b/include/opengm/learning/solver/LinearObjective.h
new file mode 100644
index 0000000..a8f1b9e
--- /dev/null
+++ b/include/opengm/learning/solver/LinearObjective.h
@@ -0,0 +1,24 @@
+#ifndef INFERENCE_LINEAR_OBJECTIVE_H__
+#define INFERENCE_LINEAR_OBJECTIVE_H__
+
+#include "QuadraticObjective.h"
+
+namespace opengm {
+namespace learning {
+namespace solver {
+
+class LinearObjective : public QuadraticObjective {
+
+public:
+
+	LinearObjective(unsigned int size = 0) : QuadraticObjective(size) {}
+
+private:
+
+	using QuadraticObjective::setQuadraticCoefficient;
+};
+
+}}} // namspace opengm::learning::solver
+
+#endif // INFERENCE_OBJECTIVE_H__
+
diff --git a/include/opengm/learning/solver/LinearSolverBackend.h b/include/opengm/learning/solver/LinearSolverBackend.h
new file mode 100644
index 0000000..6ba5b2c
--- /dev/null
+++ b/include/opengm/learning/solver/LinearSolverBackend.h
@@ -0,0 +1,84 @@
+#ifndef INFERENCE_LINEAR_SOLVER_BACKEND_H__
+#define INFERENCE_LINEAR_SOLVER_BACKEND_H__
+
+#include "LinearObjective.h"
+#include "LinearConstraints.h"
+#include "Solution.h"
+#include "VariableType.h"
+
+namespace opengm {
+namespace learning {
+namespace solver {
+
+class LinearSolverBackend {
+
+public:
+
+	virtual ~LinearSolverBackend() {};
+
+	/**
+	 * Initialise the linear solver for the given type of variables.
+	 *
+	 * @param numVariables The number of variables in the problem.
+	 * @param variableType The type of the variables (Continuous, Integer,
+	 *                     Binary).
+	 */
+	virtual void initialize(
+			unsigned int numVariables,
+			VariableType variableType) = 0;
+
+	/**
+	 * Initialise the linear solver for the given type of variables.
+	 *
+	 * @param numVariables
+	 *             The number of variables in the problem.
+	 * 
+	 * @param defaultVariableType
+	 *             The default type of the variables (Continuous, Integer, 
+	 *             Binary).
+	 *
+	 * @param specialVariableTypes
+	 *             A map of variable numbers to variable types to override the 
+	 *             default.
+	 */
+	virtual void initialize(
+			unsigned int                                numVariables,
+			VariableType                                defaultVariableType,
+			const std::map<unsigned int, VariableType>& specialVariableTypes) = 0;
+
+	/**
+	 * Set the objective.
+	 *
+	 * @param objective A linear objective.
+	 */
+	virtual void setObjective(const LinearObjective& objective) = 0;
+
+	/**
+	 * Set the linear (in)equality constraints.
+	 *
+	 * @param constraints A set of linear constraints.
+	 */
+	virtual void setConstraints(const LinearConstraints& constraints) = 0;
+
+	/**
+	 * Add a single linear constraint.
+	 *
+	 * @param constraint The constraint to add.
+	 */
+	virtual void addConstraint(const LinearConstraint& constraint) = 0;
+
+	/**
+	 * Solve the problem.
+	 *
+	 * @param solution A solution object to write the solution to.
+	 * @param value The optimal value of the objective.
+	 * @param message A status message from the solver.
+	 * @return true, if the optimal value was found.
+	 */
+	virtual bool solve(Solution& solution, double& value, std::string& message) = 0;
+};
+
+}}} // namespace opengm::learning::solver
+
+#endif // INFERENCE_LINEAR_SOLVER_BACKEND_H__
+
diff --git a/include/opengm/learning/solver/QuadraticObjective.h b/include/opengm/learning/solver/QuadraticObjective.h
new file mode 100644
index 0000000..5b127a4
--- /dev/null
+++ b/include/opengm/learning/solver/QuadraticObjective.h
@@ -0,0 +1,180 @@
+#ifndef INFERENCE_QUADRATIC_OBJECTIVE_H__
+#define INFERENCE_QUADRATIC_OBJECTIVE_H__
+
+#include <map>
+
+#include "Sense.h"
+
+namespace opengm {
+namespace learning {
+namespace solver {
+
+class QuadraticObjective {
+
+public:
+
+	/**
+	 * Create a new quadratic objective for 'size' varibales.
+	 *
+	 * @param size The number of coefficients in the objective.
+	 */
+	QuadraticObjective(unsigned int size = 0);
+
+	/**
+	 * Set the constant value of the expression.
+	 *
+	 * @param constant The value of the constant part of the objective.
+	 */
+	void setConstant(double constant);
+
+	/**
+	 * @return The value of the constant part of the objective.
+	 */
+	double getConstant() const;
+
+	/**
+	 * Add a coefficient.
+	 *
+	 * @param varNum The number of the variable to add the coefficient for.
+	 * @param coef The value of the coefficient.
+	 */
+	void setCoefficient(unsigned int varNum, double coef);
+
+	/**
+	 * Get the linear coefficients of this objective as a map of variable
+	 * numbers to coefficient values.
+	 *
+	 * @return A map from variable numbers to coefficient values.
+	 */
+	const std::vector<double>& getCoefficients() const;
+
+	/**
+	 * Add a quadratic coefficient. Use this to fill the Q matrix in the
+	 * objective <a,x> + xQx.
+	 *
+	 * @param varNum1 The row of Q.
+	 * @param varNum2 The columnt of Q.
+	 * @param coef The value of the coefficient.
+	 */
+	void setQuadraticCoefficient(unsigned int varNum1, unsigned int varNum2, double coef);
+
+	/**
+	 * Get the quadratic coefficients of this objective as a map of pairs of variable
+	 * numbers to coefficient values.
+	 *
+	 * @return A map from pairs of variable numbers to coefficient values.
+	 */
+	const std::map<std::pair<unsigned int, unsigned int>, double>& getQuadraticCoefficients() const;
+
+	/**
+	 * Set the sense of the objective.
+	 *
+	 * @param sense Minimize or Maximize.
+	 */
+	void setSense(Sense sense);
+
+	/**
+	 * Get the sense of this objective.
+	 *
+	 * @return Minimize or Maximize.
+	 */
+	Sense getSense() const;
+
+	/**
+	 * Resize the objective. New coefficients will be set to zero.
+	 *
+	 * @param The new size of the objective.
+	 */
+	void resize(unsigned int size);
+
+	/**
+	 * Get the number of variables in this objective.
+	 *
+	 * @return The number of variables in this objective.
+	 */
+	unsigned int size() const { return _coefs.size(); }
+
+private:
+
+	Sense _sense;
+
+	double _constant;
+
+	// linear coefficients are assumed to be dense, therefore we use a vector
+	std::vector<double> _coefs;
+
+	std::map<std::pair<unsigned int, unsigned int>, double> _quadraticCoefs;
+};
+
+QuadraticObjective::QuadraticObjective(unsigned int size) :
+	_sense(Minimize),
+	_constant(0) {
+
+	resize(size);
+}
+
+void
+QuadraticObjective::setConstant(double constant) {
+
+	_constant = constant;
+}
+
+double
+QuadraticObjective::getConstant() const {
+
+	return _constant;
+}
+
+void
+QuadraticObjective::setCoefficient(unsigned int varNum, double coef) {
+
+	_coefs[varNum] = coef;
+}
+
+const std::vector<double>&
+QuadraticObjective::getCoefficients() const {
+
+	return _coefs;
+}
+
+void
+QuadraticObjective::setQuadraticCoefficient(unsigned int varNum1, unsigned int varNum2, double coef) {
+
+	if (coef == 0) {
+
+		_quadraticCoefs.erase(_quadraticCoefs.find(std::make_pair(varNum1, varNum2)));
+
+	} else {
+
+		_quadraticCoefs[std::make_pair(varNum1, varNum2)] = coef;
+	}
+}
+
+const std::map<std::pair<unsigned int, unsigned int>, double>&
+QuadraticObjective::getQuadraticCoefficients() const {
+
+	return _quadraticCoefs;
+}
+
+void
+QuadraticObjective::setSense(Sense sense) {
+
+	_sense = sense;
+}
+
+Sense
+QuadraticObjective::getSense() const {
+
+	return _sense;
+}
+
+void
+QuadraticObjective::resize(unsigned int size) {
+
+	_coefs.resize(size, 0.0);
+}
+
+}}} // namespace opengm::learning::solver
+
+#endif // INFERENCE_QUADRATIC_OBJECTIVE_H__
+
diff --git a/include/opengm/learning/solver/QuadraticSolverBackend.h b/include/opengm/learning/solver/QuadraticSolverBackend.h
new file mode 100644
index 0000000..cc3a160
--- /dev/null
+++ b/include/opengm/learning/solver/QuadraticSolverBackend.h
@@ -0,0 +1,28 @@
+#ifndef INFERENCE_QUADRATIC_SOLVER_BACKEND_H__
+#define INFERENCE_QUADRATIC_SOLVER_BACKEND_H__
+
+#include "QuadraticObjective.h"
+#include "LinearSolverBackend.h"
+
+namespace opengm {
+namespace learning {
+namespace solver {
+
+class QuadraticSolverBackend : public LinearSolverBackend {
+
+public:
+
+	virtual ~QuadraticSolverBackend() {};
+
+	/**
+	 * Set the objective.
+	 *
+	 * @param objective A quadratic objective.
+	 */
+	virtual void setObjective(const QuadraticObjective& objective) = 0;
+};
+
+}}} // namspace opengm::learning::solver
+
+#endif // INFERENCE_QUADRATIC_SOLVER_BACKEND_H__
+
diff --git a/include/opengm/learning/solver/QuadraticSolverFactory.h b/include/opengm/learning/solver/QuadraticSolverFactory.h
new file mode 100644
index 0000000..1476907
--- /dev/null
+++ b/include/opengm/learning/solver/QuadraticSolverFactory.h
@@ -0,0 +1,29 @@
+#ifndef OPENGM_LEARNING_SOLVER_QUADRATIC_SOLVER_FACTORY_H__
+#define OPENGM_LEARNING_SOLVER_QUADRATIC_SOLVER_FACTORY_H__
+
+#ifdef WITH_GUROBI
+#include "GurobiBackend.h"
+#endif
+
+namespace opengm {
+namespace learning {
+namespace solver {
+
+class QuadraticSolverFactory {
+
+public:
+
+	static QuadraticSolverBackend* Create() {
+
+#ifdef WITH_GUROBI
+		return new GurobiBackend();
+#endif
+
+      throw opengm::RuntimeError("No quadratic solver available.");
+	}
+};
+
+}}} // namespace opengm::learning::solver
+
+#endif // OPENGM_LEARNING_SOLVER_QUADRATIC_SOLVER_FACTORY_H__
+
diff --git a/include/opengm/learning/solver/QuadraticSolverParameters.h b/include/opengm/learning/solver/QuadraticSolverParameters.h
new file mode 100644
index 0000000..42486e8
--- /dev/null
+++ b/include/opengm/learning/solver/QuadraticSolverParameters.h
@@ -0,0 +1,15 @@
+#ifndef INFERENCE_QUADRATIC_SOLVER_PARAMETERS_H__
+#define INFERENCE_QUADRATIC_SOLVER_PARAMETERS_H__
+
+#include "LinearSolverParameters.h"
+
+namespace opengm {
+namespace learning {
+namespace solver {
+
+class QuadraticSolverParameters : public LinearSolverParameters {};
+
+}}} // namspace opengm::learning::solver
+
+#endif // INFERENCE_QUADRATIC_SOLVER_PARAMETERS_H__
+
diff --git a/include/opengm/learning/solver/Relation.h b/include/opengm/learning/solver/Relation.h
new file mode 100644
index 0000000..7364591
--- /dev/null
+++ b/include/opengm/learning/solver/Relation.h
@@ -0,0 +1,20 @@
+#ifndef INFERENCE_RELATION_H__
+#define INFERENCE_RELATION_H__
+
+namespace opengm {
+namespace learning {
+namespace solver {
+
+/** Used to indicate the relation of a linear constraint.
+ */
+enum Relation {
+
+	LessEqual,
+	Equal,
+	GreaterEqual
+};
+
+}}} // namspace opengm::learning::solver
+
+#endif // INFERENCE_RELATION_H__
+
diff --git a/include/opengm/learning/solver/Sense.h b/include/opengm/learning/solver/Sense.h
new file mode 100644
index 0000000..3f50c3a
--- /dev/null
+++ b/include/opengm/learning/solver/Sense.h
@@ -0,0 +1,20 @@
+#ifndef SENSE_H__
+#define SENSE_H__
+
+namespace opengm {
+namespace learning {
+namespace solver {
+
+/** Used to indicate whether an objective is supposed to be minimized or
+ * maximized.
+ */
+enum Sense {
+
+	Minimize,
+	Maximize
+};
+
+}}} // namspace opengm::learning::solver
+
+#endif // SENSE_H__
+
diff --git a/include/opengm/learning/solver/Solution.h b/include/opengm/learning/solver/Solution.h
new file mode 100644
index 0000000..b2a2f72
--- /dev/null
+++ b/include/opengm/learning/solver/Solution.h
@@ -0,0 +1,49 @@
+#ifndef INFERENCE_SOLUTION_H__
+#define INFERENCE_SOLUTION_H__
+
+namespace opengm {
+namespace learning {
+namespace solver {
+
+class Solution {
+
+public:
+
+	Solution(unsigned int size = 0);
+
+	void resize(unsigned int size);
+
+	unsigned int size() const { return _solution.size(); }
+
+	const double& operator[](unsigned int i) const { return _solution[i]; }
+
+	double& operator[](unsigned int i) { return _solution[i]; }
+
+	std::vector<double>& getVector() { return _solution; }
+
+	void setValue(double value) { _value = value; }
+
+	double getValue() { return _value; }
+
+private:
+
+	std::vector<double> _solution;
+
+	double _value;
+};
+
+Solution::Solution(unsigned int size) {
+
+	resize(size);
+}
+
+void
+Solution::resize(unsigned int size) {
+
+	_solution.resize(size);
+}
+
+}}} // namspace opengm::learning::solver
+
+#endif // INFERENCE_SOLUTION_H__
+
diff --git a/include/opengm/learning/solver/VariableType.h b/include/opengm/learning/solver/VariableType.h
new file mode 100644
index 0000000..d107a41
--- /dev/null
+++ b/include/opengm/learning/solver/VariableType.h
@@ -0,0 +1,18 @@
+#ifndef INFERENCE_VARIABLE_TYPE_H__
+#define INFERENCE_VARIABLE_TYPE_H__
+
+namespace opengm {
+namespace learning {
+namespace solver {
+
+enum VariableType {
+
+	Continuous,
+	Integer,
+	Binary
+};
+
+}}} // namspace opengm::learning::solver
+
+#endif // INFERENCE_VARIABLE_TYPE_H__
+
diff --git a/src/unittest/CMakeLists.txt b/src/unittest/CMakeLists.txt
index 717f505..efa4b5d 100644
--- a/src/unittest/CMakeLists.txt
+++ b/src/unittest/CMakeLists.txt
@@ -82,8 +82,11 @@ if(BUILD_TESTING)
       add_test(test-io-hdf5 ${CMAKE_CURRENT_BINARY_DIR}/test-io-hdf5)
    endif()
 
-   ADD_EXECUTABLE(test-learning test_learning.cxx ${headers})
-   add_test(test-learning ${CMAKE_CURRENT_BINARY_DIR}/test-learning) 
+   if (WITH_GUROBI)
+     ADD_EXECUTABLE(test-learning test_learning.cxx ${headers})
+      target_link_libraries(test-learning gurobi_c++ ${GUROBI_LIBRARY})
+     add_test(test-learning ${CMAKE_CURRENT_BINARY_DIR}/test-learning) 
+   endif()
 
    add_subdirectory(inference)
 endif()

-- 
Alioth's /usr/local/bin/git-commit-notice on /srv/git.debian.org/git/debian-science/packages/opengm.git



More information about the debian-science-commits mailing list