Skip to content

Commit

Permalink
Implemented default max-retry of 60s. (#275)
Browse files Browse the repository at this point in the history
* Implemented default max-retry of 60s.

This means if any REST rate limit occurs that lasts more than 60 seconds,
you will instead get a hikari.errors.RateLimitTooLongError get raised
instead which contains details of how long to back off for should you
wish to retry later.

You can disable this functionality by passing `max_rate_limit=float("inf")`
to the `hikari.impl.bot.BotApp` and `hikari.impl.rest.RESTApp` constructors
if you have a reason to not want this. The assumption is that in 99% of
cases, waiting for sustained periods of time would be confusing to the
user and lead to a poorer user experience overall, which is why this has
been implemented.

I still need to fix the tests.

* Fix documentation and tests (#285)

* Fix linting
  - Reorder `nox` to generate pages last
  - Made isort ignore __init__.pyi files

Co-authored-by: davfsa <[email protected]>
  • Loading branch information
Nekokatt and davfsa authored Oct 8, 2020
1 parent 0e11661 commit ef72110
Show file tree
Hide file tree
Showing 9 changed files with 547 additions and 134 deletions.
1 change: 1 addition & 0 deletions .isort.cfg
Original file line number Diff line number Diff line change
@@ -1,3 +1,4 @@
[settings]
profile = black
force_single_line = true
skip_glob = **/__init__.pyi
428 changes: 343 additions & 85 deletions hikari/api/rest.py

Large diffs are not rendered by default.

64 changes: 50 additions & 14 deletions hikari/errors.py
Original file line number Diff line number Diff line change
Expand Up @@ -29,6 +29,7 @@
"HikariInterrupt",
"NotFoundError",
"RateLimitedError",
"RateLimitTooLongError",
"UnauthorizedError",
"ForbiddenError",
"BadRequestError",
Expand Down Expand Up @@ -414,12 +415,7 @@ class NotFoundError(ClientHTTPResponseError):

@attr.s(auto_exc=True, kw_only=True, slots=True, repr=False, weakref_slot=False)
class RateLimitedError(ClientHTTPResponseError):
"""Raised when a non-global ratelimit that cannot be handled occurs.
This should only ever occur for specific routes that have additional
rate-limits applied to them by Discord. At the time of writing, the
PATCH CHANNEL _endpoint is the only one that knowingly implements this, and
does so by implementing rate-limits on the usage of specific fields only.
"""Raised when a non-global rate limit that cannot be handled occurs.
If you receive one of these, you should NOT try again until the given
time has passed, either discarding the operation you performed, or waiting
Expand All @@ -432,14 +428,6 @@ class RateLimitedError(ClientHTTPResponseError):
this, you may be able to send different requests that manipulate the same
entities (in this case editing the same channel) that do not use the same
collection of attributes as the previous request.
You should not usually see this occur, unless Discord vastly change their
ratelimit system without prior warning, which might happen in the future.
!!! note
If you receive this regularly, please file a bug report, or contact
Discord with the relevant debug information that can be obtained by
enabling debug logs and enabling the debug mode on the HTTP components.
"""

route: routes.CompiledRoute = attr.ib()
Expand All @@ -459,6 +447,54 @@ def _(self) -> str:
return f"You are being rate-limited for {self.retry_after:,} seconds on route {self.route}. Please slow down!"


@attr.s(auto_exc=True, kw_only=True, slots=True, repr=False, weakref_slot=False)
class RateLimitTooLongError(HTTPError):
"""Internal error raised if the wait for a rate limit is too long.
This is similar to `asyncio.TimeoutError` in the way that it is used,
but this will be raised pre-emptively and immediately if the period
of time needed to wait is greater than a user-defined limit.
This will almost always be route-specific. If you receive this, it is
unlikely that performing the same call for a different channel/guild/user
will also have this rate limit.
"""

route: routes.CompiledRoute = attr.ib()
"""The route that produced this error."""

retry_after: float = attr.ib()
"""How many seconds to wait before you can retry this specific request."""

max_retry_after: float = attr.ib()
"""How long the client is allowed to wait for at a maximum before raising."""

reset_at: float = attr.ib()
"""UNIX timestamp of when this limit will be lifted."""

limit: int = attr.ib()
"""The maximum number of calls per window for this rate limit."""

period: float = attr.ib()
"""How long the rate limit window lasts for from start to end."""

# This may support other types of limits in the future, this currently
# exists to be self-documenting to the user and for future compatibility
# only.
@property
def remaining(self) -> typing.Literal[0]: # noqa: D401 - Imperative mood
"""The number of requests that are remaining in this window.
This will always be `0` symbolically.
Returns
-------
builtins.int
The number of requests remaining. Always `0`.
"""
return 0


@attr.s(auto_exc=True, slots=True, repr=False, weakref_slot=False)
class InternalServerError(HTTPResponseError):
"""Base exception for an erroneous HTTP response that is a server error.
Expand Down
15 changes: 15 additions & 0 deletions hikari/impl/bot.py
Original file line number Diff line number Diff line change
Expand Up @@ -180,6 +180,19 @@ class BotApp(traits.BotAware, event_dispatcher.EventDispatcher):
Note that `"TRACE_HIKARI"` is a library-specific logging level
which is expected to be more verbose than `"DEBUG"`.
max_rate_limit : builtins.float
The max number of seconds to backoff for when rate limited. Anything
greater than this will instead raise an error.
This defaults to one minute if left to the default value. This is to
stop potentially indefinitely waiting on an endpoint, which is almost
never what you want to do if giving a response to a user.
You can set this to `float("inf")` to disable this check entirely.
Note that this only applies to the REST API component that communicates
with Discord, and will not affect sharding or third party HTTP endpoints
that may be in use.
proxy_settings : typing.Optional[config.ProxySettings]
Custom proxy settings to use with network-layer logic
in your application to get through an HTTP-proxy.
Expand Down Expand Up @@ -262,6 +275,7 @@ def __init__(
http_settings: typing.Optional[config.HTTPSettings] = None,
intents: intents_.Intents = intents_.Intents.ALL_UNPRIVILEGED,
logs: typing.Union[None, LoggerLevelT, typing.Dict[str, typing.Any]] = "INFO",
max_rate_limit: float = 60,
proxy_settings: typing.Optional[config.ProxySettings] = None,
rest_url: typing.Optional[str] = None,
) -> None:
Expand Down Expand Up @@ -325,6 +339,7 @@ def __init__(
entity_factory=self._entity_factory,
executor=self._executor,
http_settings=self._http_settings,
max_rate_limit=max_rate_limit,
proxy_settings=self._proxy_settings,
rest_url=rest_url,
token=token,
Expand Down
48 changes: 45 additions & 3 deletions hikari/impl/buckets.py
Original file line number Diff line number Diff line change
Expand Up @@ -209,6 +209,7 @@
import logging
import typing

from hikari import errors
from hikari.impl import rate_limits
from hikari.internal import aio
from hikari.internal import routes
Expand Down Expand Up @@ -260,18 +261,31 @@ def is_unknown(self) -> bool:
"""Return `builtins.True` if the bucket represents an `UNKNOWN` bucket."""
return self.name.startswith(UNKNOWN_HASH)

def acquire(self) -> asyncio.Future[None]:
def acquire(self, max_rate_limit: float = float("inf")) -> asyncio.Future[None]:
"""Acquire time on this rate limiter.
!!! note
You should afterwards invoke `RESTBucket.update_rate_limit` to
update any rate limit information you are made aware of.
Parameters
----------
max_rate_limit : builtins.float
The max number of seconds to backoff for when rate limited. Anything
greater than this will instead raise an error.
The default is an infinite value, which will thus never time out.
Returns
-------
asyncio.Future[builtins.None]
A future that should be awaited immediately. Once the future completes,
you are allowed to proceed with your operation.
If the reset-after time for the bucket is greater than
`max_rate_limit`, then this will contain `RateLimitTooLongError`
as an exception.
"""
return aio.completed_future(None) if self.is_unknown else super().acquire()

Expand Down Expand Up @@ -315,6 +329,12 @@ class RESTBucketManager:
This is designed to provide bucketed rate limiting for Discord HTTP
endpoints that respects the `X-RateLimit-Bucket` rate limit header. To do
this, it makes the assumption that any limit can change at any time.
Parameters
----------
max_rate_limit : builtins.float
The max number of seconds to backoff for when rate limited. Anything
greater than this will instead raise an error.
"""

_POLL_PERIOD: typing.Final[typing.ClassVar[int]] = 20
Expand All @@ -325,6 +345,7 @@ class RESTBucketManager:
"real_hashes_to_buckets",
"closed_event",
"gc_task",
"max_rate_limit",
)

routes_to_hashes: typing.Final[typing.MutableMapping[routes.Route, str]]
Expand All @@ -342,11 +363,18 @@ class RESTBucketManager:
gc_task: typing.Optional[asyncio.Task[None]]
"""The internal garbage collector task."""

def __init__(self) -> None:
max_rate_limit: float
"""The max number of seconds to backoff for when rate limited.
Anything greater than this will instead raise an error.
"""

def __init__(self, max_rate_limit: float) -> None:
self.routes_to_hashes = {}
self.real_hashes_to_buckets = {}
self.closed_event: asyncio.Event = asyncio.Event()
self.gc_task: typing.Optional[asyncio.Task[None]] = None
self.max_rate_limit = max_rate_limit

def __enter__(self) -> RESTBucketManager:
return self
Expand Down Expand Up @@ -527,7 +555,21 @@ def acquire(self, compiled_route: routes.CompiledRoute) -> asyncio.Future[None]:
bucket = RESTBucket(real_bucket_hash, compiled_route)
self.real_hashes_to_buckets[real_bucket_hash] = bucket

return bucket.acquire()
now = time.monotonic()

if bucket.is_rate_limited(now):
if bucket.reset_at > self.max_rate_limit:
raise errors.RateLimitTooLongError(
route=compiled_route,
retry_after=bucket.reset_at - now,
max_retry_after=self.max_rate_limit,
reset_at=bucket.reset_at,
limit=bucket.limit,
period=bucket.period,
message="The request has been rejected, as you would be waiting for more than the max retry-after",
)

return bucket.acquire(self.max_rate_limit)

def update_rate_limits(
self,
Expand Down
29 changes: 23 additions & 6 deletions hikari/impl/rest.py
Original file line number Diff line number Diff line change
Expand Up @@ -214,6 +214,15 @@ class RESTApp(traits.ExecutorAware):
http_settings : typing.Optional[hikari.config.HTTPSettings]
HTTP settings to use. Sane defaults are used if this is
`builtins.None`.
max_rate_limit : builtins.float
Maximum number of seconds to sleep for when rate limited. If a rate
limit occurs that is longer than this value, then a
`hikari.errors.RateLimitedError` will be raised instead of waiting.
This is provided since some endpoints may respond with non-sensible
rate limits.
Defaults to one minute if unspecified.
proxy_settings : typing.Optional[hikari.config.ProxySettings]
Proxy settings to use. If `builtins.None` then no proxy configuration
will be used.
Expand All @@ -232,6 +241,7 @@ class RESTApp(traits.ExecutorAware):
"_event_loop",
"_executor",
"_http_settings",
"_max_rate_limit",
"_proxy_settings",
"_url",
)
Expand All @@ -243,6 +253,7 @@ def __init__(
connector_owner: bool = True,
executor: typing.Optional[concurrent.futures.Executor] = None,
http_settings: typing.Optional[config.HTTPSettings] = None,
max_rate_limit: float = 60,
proxy_settings: typing.Optional[config.ProxySettings] = None,
url: typing.Optional[str] = None,
) -> None:
Expand All @@ -264,6 +275,7 @@ def __init__(
self._connector_owner = connector_owner
self._event_loop: typing.Optional[asyncio.AbstractEventLoop] = None
self._executor = executor
self._max_rate_limit = max_rate_limit
self._url = url

@property
Expand Down Expand Up @@ -309,6 +321,7 @@ def acquire(
entity_factory=entity_factory,
executor=self._executor,
http_settings=self._http_settings,
max_rate_limit=self._max_rate_limit,
proxy_settings=self._proxy_settings,
token=token,
token_type=token_type,
Expand Down Expand Up @@ -370,6 +383,13 @@ class RESTClientImpl(rest_api.RESTClient):
executor : typing.Optional[concurrent.futures.Executor]
The executor to use for blocking IO. Defaults to the `asyncio` thread
pool if set to `builtins.None`.
max_rate_limit : builtins.float
Maximum number of seconds to sleep for when rate limited. If a rate
limit occurs that is longer than this value, then a
`hikari.errors.RateLimitedError` will be raised instead of waiting.
This is provided since some endpoints may respond with non-sensible
rate limits.
token : hikari.undefined.UndefinedOr[builtins.str]
The bot or bearer token. If no token is to be used,
this can be undefined.
Expand Down Expand Up @@ -414,12 +434,13 @@ def __init__(
entity_factory: entity_factory_.EntityFactory,
executor: typing.Optional[concurrent.futures.Executor],
http_settings: config.HTTPSettings,
max_rate_limit: float,
proxy_settings: config.ProxySettings,
token: typing.Optional[str],
token_type: typing.Optional[str] = None,
rest_url: typing.Optional[str],
) -> None:
self.buckets = buckets.RESTBucketManager()
self.buckets = buckets.RESTBucketManager(max_rate_limit)
# We've been told in DAPI that this is per token.
self.global_rate_limit = rate_limits.ManualRateLimiter()

Expand Down Expand Up @@ -517,11 +538,7 @@ async def _request(
no_auth: bool = False,
) -> typing.Union[None, data_binding.JSONObject, data_binding.JSONArray]:
# Make a ratelimit-protected HTTP request to a JSON endpoint and expect some form
# of JSON response. If an error occurs, the response body is returned in the
# raised exception as a bytes object. This is done since the differences between
# the V6 and V7 API error messages are not documented properly, and there are
# edge cases such as Cloudflare issues where we may receive arbitrary data in
# the response instead of a JSON object.
# of JSON response.

if not self.buckets.is_started:
self.buckets.start()
Expand Down
2 changes: 1 addition & 1 deletion pipelines/nox.py
Original file line number Diff line number Diff line change
Expand Up @@ -31,7 +31,7 @@
from pipelines import config

# Default sessions should be defined here
_options.sessions = ["reformat-code", "pytest", "pdoc3", "pages", "flake8", "mypy", "safety"]
_options.sessions = ["reformat-code", "pytest", "flake8", "mypy", "safety", "pdoc3", "pages"]


def session(*, only_if=lambda: True, reuse_venv: bool = False, **kwargs):
Expand Down
Loading

0 comments on commit ef72110

Please sign in to comment.