Skip to content

Commit

Permalink
treewide: replace boost::irange with std::views::iota where possible
Browse files Browse the repository at this point in the history
this change is created in the same spirit of f322e76.

in this change, we partially modernizes our range usage while maintaining existing functionality.

Signed-off-by: Kefu Chai <[email protected]>
  • Loading branch information
tchaikov committed Oct 5, 2024
1 parent a30ec0c commit f1c5606
Show file tree
Hide file tree
Showing 25 changed files with 76 additions and 76 deletions.
13 changes: 7 additions & 6 deletions apps/io_tester/io_tester.cc
Original file line number Diff line number Diff line change
Expand Up @@ -36,6 +36,7 @@
#include <seastar/util/later.hh>
#include <chrono>
#include <optional>
#include <ranges>
#include <utility>
#include <unordered_set>
#include <vector>
Expand Down Expand Up @@ -108,7 +109,7 @@ future<std::pair<file, uint64_t>> create_and_fill_file(sstring name, uint64_t fs
const uint64_t last_buffer_id = (buffers_count - 1u);
const uint64_t last_write_position = buffer_size * last_buffer_id;

return do_with(boost::irange(UINT64_C(0), buffers_count), [f, buffer_size] (auto& buffers_range) mutable {
return do_with(std::views::iota(UINT64_C(0), buffers_count), [f, buffer_size] (auto& buffers_range) mutable {
return max_concurrent_for_each(buffers_range.begin(), buffers_range.end(), 64, [f, buffer_size] (auto buffer_id) mutable {
auto source_buffer = allocate_and_fill_buffer(buffer_size);
auto write_position = buffer_id * buffer_size;
Expand Down Expand Up @@ -338,7 +339,7 @@ class class_data {
}

future<> issue_requests_in_parallel(std::chrono::steady_clock::time_point stop) {
return parallel_for_each(boost::irange(0u, parallelism()), [this, stop] (auto dummy) mutable {
return parallel_for_each(std::views::iota(0u, parallelism()), [this, stop] (auto dummy) mutable {
auto bufptr = allocate_aligned_buffer<char>(this->req_size(), _alignment);
auto buf = bufptr.get();
return do_until([this, stop] { return std::chrono::steady_clock::now() > stop || requests() > limit(); }, [this, buf, stop] () mutable {
Expand All @@ -352,7 +353,7 @@ class class_data {

future<> issue_requests_at_rate(std::chrono::steady_clock::time_point stop) {
return do_with(io_intent{}, 0u, [this, stop] (io_intent& intent, unsigned& in_flight) {
return parallel_for_each(boost::irange(0u, parallelism()), [this, stop, &intent, &in_flight] (auto dummy) mutable {
return parallel_for_each(std::views::iota(0u, parallelism()), [this, stop, &intent, &in_flight] (auto dummy) mutable {
auto bufptr = allocate_aligned_buffer<char>(this->req_size(), _alignment);
auto buf = bufptr.get();
auto pause = std::chrono::duration_cast<std::chrono::microseconds>(1s) / rps();
Expand All @@ -361,7 +362,7 @@ class class_data {
return do_until([this, stop] { return std::chrono::steady_clock::now() > stop || requests() > limit(); }, [this, buf, stop, pause, &intent, &in_flight] () mutable {
auto start = std::chrono::steady_clock::now();
in_flight++;
return parallel_for_each(boost::irange(0u, batch()), [this, buf, &intent, start, stop] (auto dummy) {
return parallel_for_each(std::views::iota(0u, batch()), [this, buf, &intent, start, stop] (auto dummy) {
return issue_request(buf, &intent, start, stop);
}).then([this, start, pause] {
auto now = std::chrono::steady_clock::now();
Expand Down Expand Up @@ -752,7 +753,7 @@ class unlink_class_data : public class_data {
return make_ready_future<>();
}

return max_concurrent_for_each(boost::irange(_file_id_to_remove, files_count()), max_concurrency(), [this] (uint64_t file_id) {
return max_concurrent_for_each(std::views::iota(_file_id_to_remove, files_count()), max_concurrency(), [this] (uint64_t file_id) {
const auto fname = get_filename(file_id);
return remove_file(fname);
});
Expand All @@ -779,7 +780,7 @@ class unlink_class_data : public class_data {
future<> do_start_on_directory(sstring path) {
_dir_path = std::move(path);

return max_concurrent_for_each(boost::irange(UINT64_C(0), files_count()), max_concurrency(), [this] (uint64_t file_id) {
return max_concurrent_for_each(std::views::iota(UINT64_C(0), files_count()), max_concurrency(), [this] (uint64_t file_id) {
const auto fname = get_filename(file_id);
const auto fsize = align_up<uint64_t>(_config.file_size / files_count(), extent_size_hint_alignment);
const auto flags = open_flags::rw | open_flags::create;
Expand Down
4 changes: 2 additions & 2 deletions apps/iotune/iotune.cc
Original file line number Diff line number Diff line change
Expand Up @@ -25,11 +25,11 @@
#include <chrono>
#include <random>
#include <memory>
#include <ranges>
#include <vector>
#include <cmath>
#include <sys/vfs.h>
#include <sys/sysmacros.h>
#include <boost/range/irange.hpp>
#include <boost/program_options.hpp>
#include <boost/iterator/counting_iterator.hpp>
#include <fstream>
Expand Down Expand Up @@ -468,7 +468,7 @@ class test_file {
}

auto worker = worker_ptr.get();
auto concurrency = boost::irange<unsigned, unsigned>(0, max_os_concurrency, 1);
auto concurrency = std::views::iota(0u, max_os_concurrency);
return parallel_for_each(std::move(concurrency), [worker] (unsigned idx) {
auto bufptr = worker->get_buffer();
auto buf = bufptr.get();
Expand Down
6 changes: 3 additions & 3 deletions apps/rpc_tester/rpc_tester.cc
Original file line number Diff line number Diff line change
Expand Up @@ -22,9 +22,9 @@
#include <vector>
#include <chrono>
#include <random>
#include <ranges>
#include <yaml-cpp/yaml.h>
#include <fmt/core.h>
#include <boost/range/irange.hpp>
#pragma GCC diagnostic push
// see https://github.com/boostorg/accumulators/pull/54
#pragma GCC diagnostic ignored "-Wuninitialized"
Expand Down Expand Up @@ -429,7 +429,7 @@ class job_rpc : public job {
co.tcp_nodelay = _ccfg.nodelay;
co.isolation_cookie = _cfg.sg_name;
_client = std::make_unique<rpc_protocol::client>(_rpc, co, _caddr);
return parallel_for_each(boost::irange(0u, _cfg.parallelism), [this] (auto dummy) {
return parallel_for_each(std::views::iota(0u, _cfg.parallelism), [this] (auto dummy) {
auto f = make_ready_future<>();
if (_cfg.sleep_time) {
// Do initial small delay to de-synchronize fibers
Expand Down Expand Up @@ -513,7 +513,7 @@ class job_cpu : public job {
virtual future<> run() override {
_stop = std::chrono::steady_clock::now() + _cfg.duration;
return with_scheduling_group(_cfg.sg, [this] {
return parallel_for_each(boost::irange(0u, _cfg.parallelism), [this] (auto dummy) {
return parallel_for_each(std::views::iota(0u, _cfg.parallelism), [this] (auto dummy) {
return do_until([this] {
return std::chrono::steady_clock::now() > _stop;
}, [this] {
Expand Down
5 changes: 2 additions & 3 deletions demos/rpc_demo.cc
Original file line number Diff line number Diff line change
Expand Up @@ -19,6 +19,7 @@
* Copyright 2015 Cloudius Systems
*/
#include <cmath>
#include <ranges>
#include <seastar/core/reactor.hh>
#include <seastar/core/app-template.hh>
#include <seastar/rpc/rpc.hh>
Expand All @@ -27,8 +28,6 @@
#include <seastar/util/log.hh>
#include <seastar/core/loop.hh>

#include <boost/range/irange.hpp>

using namespace seastar;

struct serializer {
Expand Down Expand Up @@ -229,7 +228,7 @@ int main(int ac, char** av) {
(void)sleep(400ms).then([test12] () mutable {
// server is configured for 10MB max, throw 25MB worth of requests at it.
auto now = rpc::rpc_clock_type::now();
return parallel_for_each(boost::irange(0, 25), [test12, now] (int idx) mutable {
return parallel_for_each(std::views::iota(0, 25), [test12, now] (int idx) mutable {
return test12(*client, 100, uninitialized_string(1'000'000)).then([idx, now] {
auto later = rpc::rpc_clock_type::now();
auto delta = std::chrono::duration_cast<std::chrono::milliseconds>(later - now);
Expand Down
6 changes: 3 additions & 3 deletions demos/scheduling_group_demo.cc
Original file line number Diff line number Diff line change
Expand Up @@ -32,7 +32,7 @@
#include <fmt/printf.h>
#include <chrono>
#include <cmath>
#include <boost/range/irange.hpp>
#include <ranges>

using namespace seastar;
using namespace std::chrono_literals;
Expand Down Expand Up @@ -78,7 +78,7 @@ future<>
run_compute_intensive_tasks(seastar::scheduling_group sg, done_func done, unsigned concurrency, unsigned& counter, std::function<future<> (unsigned& counter)> task) {
return seastar::async([task = std::move(task), sg, concurrency, done, &counter] () mutable {
while (!done()) {
parallel_for_each(boost::irange(0u, concurrency), [task, sg, &counter] (unsigned i) mutable {
parallel_for_each(std::views::iota(0u, concurrency), [task, sg, &counter] (unsigned i) mutable {
return with_scheduling_group(sg, [task, &counter] {
return task(counter);
});
Expand All @@ -92,7 +92,7 @@ future<>
run_compute_intensive_tasks_in_threads(seastar::scheduling_group sg, done_func done, unsigned concurrency, unsigned& counter, std::function<future<> (unsigned& counter)> task) {
auto attr = seastar::thread_attributes();
attr.sched_group = sg;
return parallel_for_each(boost::irange(0u, concurrency), [attr, done, &counter, task] (unsigned i) {
return parallel_for_each(std::views::iota(0u, concurrency), [attr, done, &counter, task] (unsigned i) {
return seastar::async(attr, [done, &counter, task] {
while (!done()) {
task(counter).get();
Expand Down
5 changes: 2 additions & 3 deletions demos/tls_simple_client_demo.cc
Original file line number Diff line number Diff line change
Expand Up @@ -19,6 +19,7 @@
* Copyright 2015 Cloudius Systems
*/
#include <cmath>
#include <ranges>

#include <seastar/core/shared_ptr.hh>
#include <seastar/core/reactor.hh>
Expand All @@ -28,8 +29,6 @@
#include <seastar/net/dns.hh>
#include "tls_echo_server.hh"

#include <boost/range/irange.hpp>

using namespace seastar;
namespace bpo = boost::program_options;

Expand Down Expand Up @@ -97,7 +96,7 @@ int main(int ac, char** av) {
}
return tls::connect(certs, ia, options).then([=](::connected_socket s) {
auto strms = ::make_lw_shared<streams>(std::move(s));
auto range = boost::irange(size_t(0), i);
auto range = std::views::iota(size_t(0), i);
return do_for_each(range, [=](auto) {
auto f = strms->out.write(*msg);
if (!do_read) {
Expand Down
4 changes: 2 additions & 2 deletions doc/tutorial.md
Original file line number Diff line number Diff line change
Expand Up @@ -1619,7 +1619,7 @@ In the examples we saw earlier, `main()` ran our function `f()` only once, on th
seastar::future<> service_loop();
seastar::future<> f() {
return seastar::parallel_for_each(boost::irange<unsigned>(0, seastar::smp::count),
return seastar::parallel_for_each(std::views::iota(0u, seastar::smp::count),
[] (unsigned c) {
return seastar::smp::submit_to(c, service_loop);
});
Expand Down Expand Up @@ -2320,7 +2320,7 @@ Consider the following asynchronous function `loop()`, which loops until some sh
```cpp
seastar::future<long> loop(int parallelism, bool& stop) {
return seastar::do_with(0L, [parallelism, &stop] (long& counter) {
return seastar::parallel_for_each(boost::irange<unsigned>(0, parallelism),
return seastar::parallel_for_each(std::views::iota(0u, parallelism),
[&stop, &counter] (unsigned c) {
return seastar::do_until([&stop] { return stop; }, [&counter] {
++counter;
Expand Down
6 changes: 3 additions & 3 deletions scripts/seastar-json2code.py
Original file line number Diff line number Diff line change
Expand Up @@ -487,8 +487,8 @@ def indent_body(s, level):
static $wrapper end() {
return $wrapper($enum_name::NUM_ITEMS);
}
static boost::integer_range<$wrapper> all_items() {
return boost::irange(begin(), end());
static std::ranges::iota_view<$wrapper, $wrapper> all_items() {
return std::views::iota(begin(), end());
}
$enum_name v;""").substitute(enum_name=enum_name,
wrapper=wrapper,
Expand Down Expand Up @@ -537,7 +537,7 @@ def create_h_file(data, hfile_name, api_name, init_method, base_api):
'<seastar/json/json_elements.hh>',
'<seastar/http/json_path.hh>'])

add_include(hfile, ['<iostream>', '<boost/range/irange.hpp>'])
add_include(hfile, ['<iostream>', '<ranges>'])
open_namespace(hfile, "seastar")
open_namespace(hfile, "httpd")
open_namespace(hfile, api_name)
Expand Down
4 changes: 2 additions & 2 deletions src/core/prometheus.cc
Original file line number Diff line number Diff line change
Expand Up @@ -33,9 +33,9 @@
#include <boost/algorithm/string.hpp>
#include <boost/range/algorithm.hpp>
#include <boost/range/combine.hpp>
#include <boost/range/irange.hpp>
#include <seastar/core/thread.hh>
#include <seastar/core/loop.hh>
#include <ranges>
#include <regex>
#include <string_view>

Expand Down Expand Up @@ -400,7 +400,7 @@ class metrics_families_per_shard {

static future<> get_map_value(metrics_families_per_shard& vec) {
vec.resize(smp::count);
return parallel_for_each(boost::irange(0u, smp::count), [&vec] (auto cpu) {
return parallel_for_each(std::views::iota(0u, smp::count), [&vec] (auto cpu) {
return smp::submit_to(cpu, [] {
return mi::get_values();
}).then([&vec, cpu] (auto res) {
Expand Down
4 changes: 2 additions & 2 deletions src/core/reactor.cc
Original file line number Diff line number Diff line change
Expand Up @@ -31,6 +31,7 @@ module;
#include <exception>
#include <filesystem>
#include <fstream>
#include <ranges>
#include <regex>
#include <thread>

Expand Down Expand Up @@ -62,7 +63,6 @@ module;
#include <boost/intrusive/list.hpp>
#include <boost/range/adaptor/transformed.hpp>
#include <boost/range/adaptor/map.hpp>
#include <boost/range/irange.hpp>
#include <boost/range/numeric.hpp>
#include <boost/range/algorithm/sort.hpp>
#include <boost/range/algorithm/remove_if.hpp>
Expand Down Expand Up @@ -1078,7 +1078,7 @@ reactor::~reactor() {
// The following line will preserve the convention that constructor and destructor functions
// for the per sg values are called in the context of the containing scheduling group.
*internal::current_scheduling_group_ptr() = scheduling_group(tq->_id);
for (size_t key : boost::irange<size_t>(0, sg_data.scheduling_group_key_configs.size())) {
for (size_t key : std::views::iota(0u, sg_data.scheduling_group_key_configs.size())) {
void* val = this_sg.specific_vals[key];
if (val) {
if (sg_data.scheduling_group_key_configs[key].destructor) {
Expand Down
4 changes: 2 additions & 2 deletions src/core/resource.cc
Original file line number Diff line number Diff line change
Expand Up @@ -28,7 +28,7 @@ module;
#include <boost/algorithm/string.hpp>
#include <boost/range/adaptor/map.hpp>
#include <boost/range/algorithm/copy.hpp>
#include <boost/range/irange.hpp>
#include <ranges>
#include <regex>
#include <stdlib.h>
#include <unistd.h>
Expand Down Expand Up @@ -401,7 +401,7 @@ allocate_io_queues(hwloc_topology_t topology, std::vector<cpu> cpus, std::unorde
// above, hwloc won't do us any good here. Later on, we will use this information to assign
// shards to coordinators that are node-local to themselves.
std::unordered_map<unsigned, std::set<unsigned>> numa_nodes;
for (auto shard: boost::irange(0, int(cpus.size()))) {
for (auto shard: std::views::iota(0, int(cpus.size()))) {
auto node_id = node_of_shard(shard);

if (numa_nodes.count(node_id) == 0) {
Expand Down
4 changes: 2 additions & 2 deletions src/core/sharded.cc
Original file line number Diff line number Diff line change
Expand Up @@ -23,7 +23,7 @@
module;
#endif

#include <boost/range/irange.hpp>
#include <ranges>

#ifdef SEASTAR_MODULE
module seastar;
Expand All @@ -39,7 +39,7 @@ namespace internal {

future<>
sharded_parallel_for_each(unsigned nr_shards, on_each_shard_func on_each_shard) noexcept(std::is_nothrow_move_constructible_v<on_each_shard_func>) {
return parallel_for_each(boost::irange<unsigned>(0, nr_shards), std::move(on_each_shard));
return parallel_for_each(std::views::iota(0u, nr_shards), std::move(on_each_shard));
}

}
Expand Down
4 changes: 2 additions & 2 deletions tests/perf/fair_queue_perf.cc
Original file line number Diff line number Diff line change
Expand Up @@ -27,7 +27,7 @@
#include <seastar/core/semaphore.hh>
#include <seastar/core/loop.hh>
#include <seastar/core/when_all.hh>
#include <boost/range/irange.hpp>
#include <ranges>

static constexpr fair_queue::class_id cid = 0;

Expand Down Expand Up @@ -98,7 +98,7 @@ struct perf_fair_queue {
future<> perf_fair_queue::test(bool loc) {

auto invokers = local_fq.invoke_on_all([loc] (local_fq_and_class& local) {
return parallel_for_each(boost::irange(0u, requests_to_dispatch), [&local, loc] (unsigned dummy) {
return parallel_for_each(std::views::iota(0u, requests_to_dispatch), [&local, loc] (unsigned dummy) {
auto cap = local.queue(loc).tokens_capacity(double(1) / std::numeric_limits<int>::max() + double(1) / std::numeric_limits<int>::max());
auto req = std::make_unique<local_fq_entry>(cap, [&local, loc, cap] {
local.executed++;
Expand Down
6 changes: 3 additions & 3 deletions tests/perf/smp_submit_to_perf.cc
Original file line number Diff line number Diff line change
Expand Up @@ -20,7 +20,7 @@
*/

#include <random>
#include <boost/range/irange.hpp>
#include <ranges>
#include <fmt/core.h>
#include <seastar/core/app-template.hh>
#include <seastar/core/thread.hh>
Expand Down Expand Up @@ -55,7 +55,7 @@ class thinker {
future<> _done;

future<> start_thinking(unsigned concurrency) {
return parallel_for_each(boost::irange(0u, concurrency), [this] (unsigned f) {
return parallel_for_each(std::views::iota(0u, concurrency), [this] (unsigned f) {
return do_until([this] { return _stop; }, [this] {
auto until = steady_clock::now() + _pause.get();
while (steady_clock::now() < until) {
Expand Down Expand Up @@ -116,7 +116,7 @@ class worker {
}

future<> start_working(unsigned concurrency, respond_type resp, microseconds tmo) {
return parallel_for_each(boost::irange(0u, concurrency), [this, resp, tmo] (unsigned f) {
return parallel_for_each(std::views::iota(0u, concurrency), [this, resp, tmo] (unsigned f) {
return do_until([this] { return _stop; }, [this, resp, tmo] {
return smp::submit_to(_to, [resp, tmo] {
switch (resp) {
Expand Down
6 changes: 3 additions & 3 deletions tests/unit/alien_test.cc
Original file line number Diff line number Diff line change
Expand Up @@ -30,9 +30,9 @@
#include <seastar/core/reactor.hh>
#include <seastar/util/later.hh>
#include <stdexcept>
#include <ranges>
#include <tuple>

#include <boost/range/irange.hpp>

using namespace seastar;

Expand Down Expand Up @@ -70,7 +70,7 @@ int main(int argc, char** argv)
});
// test for alien::submit_to(), which returns a std::future<int>
std::vector<std::future<int>> counts;
for (auto i : boost::irange(0u, smp::count)) {
for (auto i : std::views::iota(0u, smp::count)) {
// send messages from alien.
counts.push_back(alien::submit_to(app.alien(), i, [i] {
return seastar::make_ready_future<int>(i);
Expand Down Expand Up @@ -122,7 +122,7 @@ int main(int argc, char** argv)
std::cerr << "Bad everything: " << everything << " != " << expected << std::endl;
return 1;
}
const auto shards = boost::irange(0u, smp::count);
const auto shards = std::views::iota(0u, smp::count);
auto expected = std::accumulate(std::begin(shards), std::end(shards), 0);
if (total != expected) {
std::cerr << "Bad total: " << total << " != " << expected << std::endl;
Expand Down
Loading

0 comments on commit f1c5606

Please sign in to comment.