From 715d4a0f4a55f36a28d11e0655cd310dabd6aa75 Mon Sep 17 00:00:00 2001 From: Pavel Emelyanov Date: Thu, 13 Jun 2024 15:23:30 +0300 Subject: [PATCH] fair_queue: Drop per-dispatch-loop threshold The value is used to limit one shard in the amount of requests it's allowed to dispatch in one poll. This is to prevent it from consuming the whole capacity in one go and let other shards get their portion. Group-wide balancing (previous patch) made this fuse obsotele. Signed-off-by: Pavel Emelyanov --- apps/io_tester/ioinfo.cc | 1 - include/seastar/core/fair_queue.hh | 2 -- src/core/fair_queue.cc | 5 +---- 3 files changed, 1 insertion(+), 7 deletions(-) diff --git a/apps/io_tester/ioinfo.cc b/apps/io_tester/ioinfo.cc index d23f265e05b..d2393e6ec77 100644 --- a/apps/io_tester/ioinfo.cc +++ b/apps/io_tester/ioinfo.cc @@ -75,7 +75,6 @@ int main(int ac, char** av) { out << YAML::EndMap; const auto& fg = internal::get_fair_group(ioq, internal::io_direction_and_length::write_idx); - out << YAML::Key << "per_tick_grab_threshold" << YAML::Value << fg.per_tick_grab_threshold(); const auto& tb = fg.token_bucket(); out << YAML::Key << "token_bucket" << YAML::BeginMap; diff --git a/include/seastar/core/fair_queue.hh b/include/seastar/core/fair_queue.hh index 7a198270e45..6be9a475e21 100644 --- a/include/seastar/core/fair_queue.hh +++ b/include/seastar/core/fair_queue.hh @@ -217,7 +217,6 @@ private: */ token_bucket_t _token_bucket; - const capacity_t _per_tick_threshold; // Capacities accumulated by queues in this group. Each queue tries not // to run too far ahead of the others, if it does -- it skips dispatch @@ -265,7 +264,6 @@ public: fair_group(fair_group&&) = delete; capacity_t maximum_capacity() const noexcept { return _token_bucket.limit(); } - capacity_t per_tick_grab_threshold() const noexcept { return _per_tick_threshold; } capacity_t grab_capacity(capacity_t cap) noexcept; clock_type::time_point replenished_ts() const noexcept { return _token_bucket.replenished_ts(); } void replenish_capacity(clock_type::time_point now) noexcept; diff --git a/src/core/fair_queue.cc b/src/core/fair_queue.cc index f67a72f209d..b97f06adda5 100644 --- a/src/core/fair_queue.cc +++ b/src/core/fair_queue.cc @@ -109,7 +109,6 @@ fair_group::fair_group(config cfg, unsigned nr_queues) std::max(fixed_point_factor * token_bucket_t::rate_cast(cfg.rate_limit_duration).count(), tokens_capacity(cfg.limit_min_tokens)), tokens_capacity(cfg.min_tokens) ) - , _per_tick_threshold(_token_bucket.limit() / nr_queues) , _balance(smp::count, max_balance) { if (tokens_capacity(cfg.min_tokens) > _token_bucket.threshold()) { @@ -359,7 +358,6 @@ fair_queue::clock_type::time_point fair_queue::next_pending_aio() const noexcept } void fair_queue::dispatch_requests(std::function cb) { - capacity_t dispatched = 0; boost::container::small_vector preempt; capacity_t balance = _group.current_balance(); @@ -367,7 +365,7 @@ void fair_queue::dispatch_requests(std::function cb) { return; } - while (!_handles.empty() && (dispatched < _group.per_tick_grab_threshold())) { + while (!_handles.empty()) { priority_class_data& h = *_handles.top(); if (h._queue.empty() || !h._plugged) { pop_priority_class(h); @@ -412,7 +410,6 @@ void fair_queue::dispatch_requests(std::function cb) { _total_accumulated += req_cost; h._accumulated += req_cost; h._pure_accumulated += req_cap; - dispatched += req_cap; cb(req);