Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Add Taskflow #70

Open
wants to merge 9 commits into
base: main
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from 7 commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
3 changes: 3 additions & 0 deletions .gitmodules
Original file line number Diff line number Diff line change
Expand Up @@ -2,3 +2,6 @@
path = testscpp/Catch2
url = https://github.com/catchorg/Catch2.git
branch = v2.x
[submodule "extern/taskflow"]
path = extern/taskflow
url = https://github.com/taskflow/taskflow.git
mdcoury marked this conversation as resolved.
Show resolved Hide resolved
241 changes: 186 additions & 55 deletions dwave/preprocessing/include/dwave/presolve.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -15,11 +15,14 @@
#pragma once

#include <algorithm>
#include <cstdint>
#include <unordered_map>
#include <utility>
#include <vector>

#include "dimod/constrained_quadratic_model.h"
#include "taskflow/core/taskflow.hpp"
#include "taskflow/taskflow.hpp"

namespace dwave {
namespace presolve {
Expand Down Expand Up @@ -114,6 +117,23 @@ void Postsolver<bias_type, index_type, assignment_type>::substitute_variable(ind
transforms_.back().offset = offset;
}

class PresolverTechniques {
public:
const static uint64_t SPIN_TO_BINARY = 1 << 0;
const static uint64_t REMOVE_OFFSETS = 1 << 1;
const static uint64_t FLIP_CONSTRAINTS = 1 << 2;
const static uint64_t REMOVE_SELF_LOOPS = 1 << 3;
const static uint64_t REMOVE_INVALID_MARKERS = 1 << 4;
const static uint64_t CHECK_FOR_NAN = 1 << 5;
const static uint64_t REMOVE_SINGLE_VAR_CONSTRAINTS = 1 << 6;
const static uint64_t REMOVE_ZERO_BIASES = 1 << 7;
const static uint64_t TIGHTEN_BOUNDS = 1 << 8;
const static uint64_t REMOVE_FIXED_VARS = 1 << 9;

const static uint64_t ALL = -1;
const static uint64_t NONE = 0;
};

template <class Bias, class Index, class Assignment>
class Presolver {
public:
Expand All @@ -139,7 +159,10 @@ class Presolver {
model_type detach_model();

/// Load the default presolve techniques.
void load_default_presolvers();
void load_default_presolvers(uint32_t max_rounds = 100) {
load_presolvers(PresolverTechniques::ALL, max_rounds);
}
void load_presolvers(uint64_t presolvers = PresolverTechniques::ALL, uint32_t max_rounds = 100);

/// Return a const reference to the held constrained quadratic model.
const model_type& model() const;
Expand All @@ -148,11 +171,28 @@ class Presolver {
const Postsolver<bias_type, index_type, assignment_type>& postsolver() const;

private:
struct TfHelper {
tf::Executor executor;
tf::Taskflow taskflow_onetime;
tf::Taskflow taskflow_trivial;
tf::Taskflow taskflow_cleanup;
uint32_t loop_counter;
bool loop_changed;
bool model_feasible = true;

bool operator=(const struct TfHelper& that) { return true; }
};

struct TfHelper tf_helper_;

void load_taskflow_onetime();
void load_taskflow_trivial(int max_rounds = 100);
void load_taskflow_cleanup();

model_type model_;
Postsolver<bias_type, index_type, assignment_type> postsolver_;

// todo: replace this with a vector of pointers or similar
bool default_techniques_;
uint64_t presolvers_ = PresolverTechniques::NONE;

bool detached_;

Expand Down Expand Up @@ -270,6 +310,10 @@ class Presolver {
return false;
}
bool technique_remove_single_variable_constraints() {
if (!tf_helper_.model_feasible) {
return false;
}

bool ret = false;
size_type c = 0;
while (c < model_.num_constraints()) {
Expand All @@ -281,20 +325,17 @@ class Presolver {
switch (constraint.sense()) {
case dimod::Sense::EQ:
if (constraint.offset() != constraint.rhs()) {
// need this exact message for Python
throw std::logic_error("infeasible");
tf_helper_.model_feasible = false;
}
break;
case dimod::Sense::LE:
if (constraint.offset() > constraint.rhs()) {
// need this exact message for Python
throw std::logic_error("infeasible");
tf_helper_.model_feasible = false;
}
break;
case dimod::Sense::GE:
if (constraint.offset() < constraint.rhs()) {
// need this exact message for Python
throw std::logic_error("infeasible");
tf_helper_.model_feasible = false;
}
break;
}
Expand Down Expand Up @@ -340,16 +381,22 @@ class Presolver {
return ret;
}
bool technique_remove_zero_biases() {
bool ret = false;
if (!tf_helper_.model_feasible) {
return false;
}

bool ret = false;
ret |= remove_zero_biases(model_.objective);
for (size_t c = 0; c < model_.num_constraints(); ++c) {
ret |= remove_zero_biases(model_.constraint_ref(c));
}

return ret;
}
bool technique_tighten_bounds() {
if (!tf_helper_.model_feasible) {
return false;
}

bool ret = false;
bias_type lb;
bias_type ub;
Expand All @@ -376,7 +423,11 @@ class Presolver {
return ret;
}
bool technique_remove_fixed_variables() {
bool ret = false;
if (!tf_helper_.model_feasible) {
return false;
}

bool ret = false;
size_type v = 0;
while (v < model_.num_variables()) {
if (model_.lower_bound(v) == model_.upper_bound(v)) {
Expand Down Expand Up @@ -418,54 +469,25 @@ class Presolver {

template <class bias_type, class index_type, class assignment_type>
Presolver<bias_type, index_type, assignment_type>::Presolver()
: model_(), postsolver_(), default_techniques_(false), detached_(false) {}
: model_(), postsolver_(), detached_(false) {}

template <class bias_type, class index_type, class assignment_type>
Presolver<bias_type, index_type, assignment_type>::Presolver(model_type model)
: model_(std::move(model)), postsolver_(), default_techniques_(), detached_(false) {}
: model_(std::move(model)), postsolver_(), detached_(false) {}

template <class bias_type, class index_type, class assignment_type>
void Presolver<bias_type, index_type, assignment_type>::apply() {
if (detached_) throw std::logic_error("model has been detached, presolver is no longer valid");

// If no techniques have been loaded, return early.
if (!default_techniques_) return;

// One time techniques ----------------------------------------------------

// *-- spin-to-binary
technique_spin_to_binary();
// *-- remove offsets
technique_remove_offsets();
// *-- flip >= constraints
technique_flip_constraints();
// *-- remove self-loops
technique_remove_self_loops();

// Trivial techniques -----------------------------------------------------

bool changes = true;
const index_type max_num_rounds = 100; // todo: make configurable
for (index_type num_rounds = 0; num_rounds < max_num_rounds; ++num_rounds) {
if (!changes) break;
changes = false;

// *-- clear out 0 variables/interactions in the constraints and objective
changes |= technique_remove_zero_biases();
// *-- todo: check for NAN
changes |= technique_check_for_nan();
// *-- remove single variable constraints
changes |= technique_remove_single_variable_constraints();
// *-- tighten bounds based on vartype
changes |= technique_tighten_bounds();
// *-- remove variables that are fixed by bounds
changes |= technique_remove_fixed_variables();
}

// Cleanup

// *-- remove any invalid discrete markers
technique_remove_invalid_markers();
if (presolvers_ == PresolverTechniques::NONE) return;

tf_helper_.executor.run(tf_helper_.taskflow_onetime).wait();
tf_helper_.executor.run(tf_helper_.taskflow_trivial).wait();
if (tf_helper_.model_feasible) {
tf_helper_.executor.run(tf_helper_.taskflow_cleanup).wait();
} else {
// need this exact message for Python
throw std::logic_error("infeasible");
}
}

template <class bias_type, class index_type, class assignment_type>
Expand All @@ -482,8 +504,117 @@ Presolver<bias_type, index_type, assignment_type>::detach_model() {
}

template <class bias_type, class index_type, class assignment_type>
void Presolver<bias_type, index_type, assignment_type>::load_default_presolvers() {
default_techniques_ = true;
void Presolver<bias_type, index_type, assignment_type>::load_presolvers(uint64_t presolvers,
uint32_t max_rounds) {
presolvers_ = presolvers;

load_taskflow_onetime();
load_taskflow_trivial(max_rounds);
load_taskflow_cleanup();
}

template <class bias_type, class index_type, class assignment_type>
void Presolver<bias_type, index_type, assignment_type>::load_taskflow_onetime() {
std::vector<tf::Task> tasks;

if (presolvers_ | PresolverTechniques::SPIN_TO_BINARY) {
auto task = tf_helper_.taskflow_onetime.emplace([&]() { technique_spin_to_binary(); });
task.name("spin_to_binary");
tasks.emplace_back(task);
}
if (presolvers_ | PresolverTechniques::REMOVE_OFFSETS) {
auto task = tf_helper_.taskflow_onetime.emplace([&]() { technique_remove_offsets(); });
task.name("remove_offsets");
tasks.emplace_back(task);
}
if (presolvers_ | PresolverTechniques::FLIP_CONSTRAINTS) {
auto task = tf_helper_.taskflow_onetime.emplace([&]() { technique_flip_constraints(); });
task.name("flip_constraints");
tasks.emplace_back(task);
}
if (presolvers_ | PresolverTechniques::REMOVE_SELF_LOOPS) {
auto task = tf_helper_.taskflow_onetime.emplace([&]() { technique_remove_self_loops(); });
task.name("remove_self_loops");
tasks.emplace_back(task);
}

for (auto i = 0; i < tasks.size() - 1; i++) {
tasks[i].precede(tasks[i + 1]);
}
}

template <class bias_type, class index_type, class assignment_type>
void Presolver<bias_type, index_type, assignment_type>::load_taskflow_trivial(int max_rounds) {
auto alpha = tf_helper_.taskflow_trivial.emplace([&]() {
tf_helper_.loop_changed = false;
tf_helper_.loop_counter = 0;
});
alpha.name("initialize");

auto omega = tf_helper_.taskflow_trivial.emplace([&]() {
if (tf_helper_.model_feasible && tf_helper_.loop_changed &&
++tf_helper_.loop_counter < max_rounds) {
tf_helper_.loop_changed = false;
return 0; // This will loop back to the first task in tasks, below
}
return 1; // This will exit the loop
});
omega.name("conditional");

std::vector<tf::Task> tasks;
if (presolvers_ | PresolverTechniques::REMOVE_ZERO_BIASES) {
auto task = tf_helper_.taskflow_trivial.emplace(
[&]() { tf_helper_.loop_changed |= technique_remove_zero_biases(); });
task.name("remove_zero_biases");
tasks.emplace_back(task);
}
if (presolvers_ | PresolverTechniques::CHECK_FOR_NAN) {
auto task = tf_helper_.taskflow_trivial.emplace(
[&]() { tf_helper_.loop_changed |= technique_check_for_nan(); });
task.name("check_for_nan");
tasks.emplace_back(task);
}
if (presolvers_ | PresolverTechniques::REMOVE_SINGLE_VAR_CONSTRAINTS) {
auto task = tf_helper_.taskflow_trivial.emplace([&]() {
tf_helper_.loop_changed |= technique_remove_single_variable_constraints();
});
task.name("remove_single_variable_constraints");
tasks.emplace_back(task);
}
if (presolvers_ | PresolverTechniques::TIGHTEN_BOUNDS) {
auto task = tf_helper_.taskflow_trivial.emplace(
[&]() { tf_helper_.loop_changed |= technique_tighten_bounds(); });
task.name("tighten_bounds");
tasks.emplace_back(task);
}
if (presolvers_ | PresolverTechniques::REMOVE_FIXED_VARS) {
auto task = tf_helper_.taskflow_trivial.emplace(
[&]() { tf_helper_.loop_changed |= technique_remove_fixed_variables(); });
task.name("remove_fixed_variables");
tasks.emplace_back(task);
}

if (tasks.size() > 0) {
alpha.precede(tasks[0]);

for (auto i = 0; i < tasks.size() - 1; i++) {
tasks[i].precede(tasks[i + 1]);
}

tasks[tasks.size() - 1].precede(omega);
omega.precede(tasks[0]);
} else {
alpha.precede(omega);
}
}

template <class bias_type, class index_type, class assignment_type>
void Presolver<bias_type, index_type, assignment_type>::load_taskflow_cleanup() {
if (presolvers_ | PresolverTechniques::REMOVE_INVALID_MARKERS) {
auto task =
tf_helper_.taskflow_cleanup.emplace([&]() { technique_remove_invalid_markers(); });
task.name("remove_invalid_markers");
}
}

template <class bias_type, class index_type, class assignment_type>
Expand Down
1 change: 1 addition & 0 deletions extern/taskflow
Submodule taskflow added at 6633a0
2 changes: 2 additions & 0 deletions setup.py
Original file line number Diff line number Diff line change
Expand Up @@ -26,6 +26,7 @@
'msvc': ['/std:c++17', '/EHsc'],
'unix': [
'-std=c++17',
'-pthread'
],
}

Expand Down Expand Up @@ -62,6 +63,7 @@ def build_extensions(self):
include_dirs=[
numpy.get_include(),
dimod.get_include(),
'extern/taskflow'
],
install_requires=[
'numpy>=1.20.0,<2.0.0', # keep synced with circle-ci, pyproject.toml
Expand Down
Loading