Skip to content

Commit

Permalink
fair_queue: Drop per-dispatch-loop threshold
Browse files Browse the repository at this point in the history
The value is used to limit one shard in the amount of requests it's
allowed to dispatch in one poll. This is to prevent it from consuming
the whole capacity in one go and let other shards get their portion.

Group-wide balancing (previous patch) made this fuse obsotele.

Signed-off-by: Pavel Emelyanov <[email protected]>
  • Loading branch information
xemul committed Jun 13, 2024
1 parent b47a932 commit 207c14a
Show file tree
Hide file tree
Showing 3 changed files with 1 addition and 7 deletions.
1 change: 0 additions & 1 deletion apps/io_tester/ioinfo.cc
Original file line number Diff line number Diff line change
Expand Up @@ -75,7 +75,6 @@ int main(int ac, char** av) {
out << YAML::EndMap;

const auto& fg = internal::get_fair_group(ioq, internal::io_direction_and_length::write_idx);
out << YAML::Key << "per_tick_grab_threshold" << YAML::Value << fg.per_tick_grab_threshold();

const auto& tb = fg.token_bucket();
out << YAML::Key << "token_bucket" << YAML::BeginMap;
Expand Down
2 changes: 0 additions & 2 deletions include/seastar/core/fair_queue.hh
Original file line number Diff line number Diff line change
Expand Up @@ -217,7 +217,6 @@ private:
*/

token_bucket_t _token_bucket;
const capacity_t _per_tick_threshold;

// Capacities accumulated by queues in this group. Each queue tries not
// to run too far ahead of the others, if it does -- it skips dispatch
Expand Down Expand Up @@ -265,7 +264,6 @@ public:
fair_group(fair_group&&) = delete;

capacity_t maximum_capacity() const noexcept { return _token_bucket.limit(); }
capacity_t per_tick_grab_threshold() const noexcept { return _per_tick_threshold; }
capacity_t grab_capacity(capacity_t cap) noexcept;
clock_type::time_point replenished_ts() const noexcept { return _token_bucket.replenished_ts(); }
void replenish_capacity(clock_type::time_point now) noexcept;
Expand Down
5 changes: 1 addition & 4 deletions src/core/fair_queue.cc
Original file line number Diff line number Diff line change
Expand Up @@ -109,7 +109,6 @@ fair_group::fair_group(config cfg, unsigned nr_queues)
std::max<capacity_t>(fixed_point_factor * token_bucket_t::rate_cast(cfg.rate_limit_duration).count(), tokens_capacity(cfg.limit_min_tokens)),
tokens_capacity(cfg.min_tokens)
)
, _per_tick_threshold(_token_bucket.limit() / nr_queues)
, _balance(smp::count, max_balance)
{
if (tokens_capacity(cfg.min_tokens) > _token_bucket.threshold()) {
Expand Down Expand Up @@ -360,15 +359,14 @@ fair_queue::clock_type::time_point fair_queue::next_pending_aio() const noexcept
}

void fair_queue::dispatch_requests(std::function<void(fair_queue_entry&)> cb) {
capacity_t dispatched = 0;
boost::container::small_vector<priority_class_ptr, 2> preempt;

capacity_t balance = _group.current_balance();
if (_total_accumulated > balance + _max_imbalance) {
return;
}

while (!_handles.empty() && (dispatched < _group.per_tick_grab_threshold())) {
while (!_handles.empty()) {
priority_class_data& h = *_handles.top();
if (h._queue.empty() || !h._plugged) {
pop_priority_class(h);
Expand Down Expand Up @@ -413,7 +411,6 @@ void fair_queue::dispatch_requests(std::function<void(fair_queue_entry&)> cb) {
_total_accumulated += req_cost;
h._accumulated += req_cost;
h._pure_accumulated += req_cap;
dispatched += req_cap;

cb(req);

Expand Down

0 comments on commit 207c14a

Please sign in to comment.