mirror of
https://github.com/element-hq/synapse.git
synced 2025-12-28 06:47:37 +00:00
Bump ruff from 0.7.3 to 0.11.10 (#18451)
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: Andrew Morgan <andrew@amorgan.xyz> Co-authored-by: Andrew Morgan <1342360+anoadragon453@users.noreply.github.com>
This commit is contained in:
parent
a6cb3533db
commit
9d43bec326
1
changelog.d/18451.misc
Normal file
1
changelog.d/18451.misc
Normal file
@ -0,0 +1 @@
|
||||
Bump ruff from 0.7.3 to 0.11.10.
|
||||
40
poetry.lock
generated
40
poetry.lock
generated
@ -2440,30 +2440,30 @@ files = [
|
||||
|
||||
[[package]]
|
||||
name = "ruff"
|
||||
version = "0.7.3"
|
||||
version = "0.11.10"
|
||||
description = "An extremely fast Python linter and code formatter, written in Rust."
|
||||
optional = false
|
||||
python-versions = ">=3.7"
|
||||
groups = ["dev"]
|
||||
files = [
|
||||
{file = "ruff-0.7.3-py3-none-linux_armv6l.whl", hash = "sha256:34f2339dc22687ec7e7002792d1f50712bf84a13d5152e75712ac08be565d344"},
|
||||
{file = "ruff-0.7.3-py3-none-macosx_10_12_x86_64.whl", hash = "sha256:fb397332a1879b9764a3455a0bb1087bda876c2db8aca3a3cbb67b3dbce8cda0"},
|
||||
{file = "ruff-0.7.3-py3-none-macosx_11_0_arm64.whl", hash = "sha256:37d0b619546103274e7f62643d14e1adcbccb242efda4e4bdb9544d7764782e9"},
|
||||
{file = "ruff-0.7.3-py3-none-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5d59f0c3ee4d1a6787614e7135b72e21024875266101142a09a61439cb6e38a5"},
|
||||
{file = "ruff-0.7.3-py3-none-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:44eb93c2499a169d49fafd07bc62ac89b1bc800b197e50ff4633aed212569299"},
|
||||
{file = "ruff-0.7.3-py3-none-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:6d0242ce53f3a576c35ee32d907475a8d569944c0407f91d207c8af5be5dae4e"},
|
||||
{file = "ruff-0.7.3-py3-none-manylinux_2_17_ppc64.manylinux2014_ppc64.whl", hash = "sha256:6b6224af8b5e09772c2ecb8dc9f3f344c1aa48201c7f07e7315367f6dd90ac29"},
|
||||
{file = "ruff-0.7.3-py3-none-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:c50f95a82b94421c964fae4c27c0242890a20fe67d203d127e84fbb8013855f5"},
|
||||
{file = "ruff-0.7.3-py3-none-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:7f3eff9961b5d2644bcf1616c606e93baa2d6b349e8aa8b035f654df252c8c67"},
|
||||
{file = "ruff-0.7.3-py3-none-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b8963cab06d130c4df2fd52c84e9f10d297826d2e8169ae0c798b6221be1d1d2"},
|
||||
{file = "ruff-0.7.3-py3-none-musllinux_1_2_aarch64.whl", hash = "sha256:61b46049d6edc0e4317fb14b33bd693245281a3007288b68a3f5b74a22a0746d"},
|
||||
{file = "ruff-0.7.3-py3-none-musllinux_1_2_armv7l.whl", hash = "sha256:10ebce7696afe4644e8c1a23b3cf8c0f2193a310c18387c06e583ae9ef284de2"},
|
||||
{file = "ruff-0.7.3-py3-none-musllinux_1_2_i686.whl", hash = "sha256:3f36d56326b3aef8eeee150b700e519880d1aab92f471eefdef656fd57492aa2"},
|
||||
{file = "ruff-0.7.3-py3-none-musllinux_1_2_x86_64.whl", hash = "sha256:5d024301109a0007b78d57ab0ba190087b43dce852e552734ebf0b0b85e4fb16"},
|
||||
{file = "ruff-0.7.3-py3-none-win32.whl", hash = "sha256:4ba81a5f0c5478aa61674c5a2194de8b02652f17addf8dfc40c8937e6e7d79fc"},
|
||||
{file = "ruff-0.7.3-py3-none-win_amd64.whl", hash = "sha256:588a9ff2fecf01025ed065fe28809cd5a53b43505f48b69a1ac7707b1b7e4088"},
|
||||
{file = "ruff-0.7.3-py3-none-win_arm64.whl", hash = "sha256:1713e2c5545863cdbfe2cbce21f69ffaf37b813bfd1fb3b90dc9a6f1963f5a8c"},
|
||||
{file = "ruff-0.7.3.tar.gz", hash = "sha256:e1d1ba2e40b6e71a61b063354d04be669ab0d39c352461f3d789cac68b54a313"},
|
||||
{file = "ruff-0.11.10-py3-none-linux_armv6l.whl", hash = "sha256:859a7bfa7bc8888abbea31ef8a2b411714e6a80f0d173c2a82f9041ed6b50f58"},
|
||||
{file = "ruff-0.11.10-py3-none-macosx_10_12_x86_64.whl", hash = "sha256:968220a57e09ea5e4fd48ed1c646419961a0570727c7e069842edd018ee8afed"},
|
||||
{file = "ruff-0.11.10-py3-none-macosx_11_0_arm64.whl", hash = "sha256:1067245bad978e7aa7b22f67113ecc6eb241dca0d9b696144256c3a879663bca"},
|
||||
{file = "ruff-0.11.10-py3-none-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f4854fd09c7aed5b1590e996a81aeff0c9ff51378b084eb5a0b9cd9518e6cff2"},
|
||||
{file = "ruff-0.11.10-py3-none-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:8b4564e9f99168c0f9195a0fd5fa5928004b33b377137f978055e40008a082c5"},
|
||||
{file = "ruff-0.11.10-py3-none-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:5b6a9cc5b62c03cc1fea0044ed8576379dbaf751d5503d718c973d5418483641"},
|
||||
{file = "ruff-0.11.10-py3-none-manylinux_2_17_ppc64.manylinux2014_ppc64.whl", hash = "sha256:607ecbb6f03e44c9e0a93aedacb17b4eb4f3563d00e8b474298a201622677947"},
|
||||
{file = "ruff-0.11.10-py3-none-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:7b3a522fa389402cd2137df9ddefe848f727250535c70dafa840badffb56b7a4"},
|
||||
{file = "ruff-0.11.10-py3-none-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:2f071b0deed7e9245d5820dac235cbdd4ef99d7b12ff04c330a241ad3534319f"},
|
||||
{file = "ruff-0.11.10-py3-none-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4a60e3a0a617eafba1f2e4186d827759d65348fa53708ca547e384db28406a0b"},
|
||||
{file = "ruff-0.11.10-py3-none-musllinux_1_2_aarch64.whl", hash = "sha256:da8ec977eaa4b7bf75470fb575bea2cb41a0e07c7ea9d5a0a97d13dbca697bf2"},
|
||||
{file = "ruff-0.11.10-py3-none-musllinux_1_2_armv7l.whl", hash = "sha256:ddf8967e08227d1bd95cc0851ef80d2ad9c7c0c5aab1eba31db49cf0a7b99523"},
|
||||
{file = "ruff-0.11.10-py3-none-musllinux_1_2_i686.whl", hash = "sha256:5a94acf798a82db188f6f36575d80609072b032105d114b0f98661e1679c9125"},
|
||||
{file = "ruff-0.11.10-py3-none-musllinux_1_2_x86_64.whl", hash = "sha256:3afead355f1d16d95630df28d4ba17fb2cb9c8dfac8d21ced14984121f639bad"},
|
||||
{file = "ruff-0.11.10-py3-none-win32.whl", hash = "sha256:dc061a98d32a97211af7e7f3fa1d4ca2fcf919fb96c28f39551f35fc55bdbc19"},
|
||||
{file = "ruff-0.11.10-py3-none-win_amd64.whl", hash = "sha256:5cc725fbb4d25b0f185cb42df07ab6b76c4489b4bfb740a175f3a59c70e8a224"},
|
||||
{file = "ruff-0.11.10-py3-none-win_arm64.whl", hash = "sha256:ef69637b35fb8b210743926778d0e45e1bffa850a7c61e428c6b971549b5f5d1"},
|
||||
{file = "ruff-0.11.10.tar.gz", hash = "sha256:d522fb204b4959909ecac47da02830daec102eeb100fb50ea9554818d47a5fa6"},
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@ -3394,4 +3394,4 @@ user-search = ["pyicu"]
|
||||
[metadata]
|
||||
lock-version = "2.1"
|
||||
python-versions = "^3.9.0"
|
||||
content-hash = "d71159b19349fdc0b7cd8e06e8c8778b603fc37b941c6df34ddc31746783d94d"
|
||||
content-hash = "522f5bacf5610646876452e0e397038dd5c959692d2ab76214431bff78562d01"
|
||||
|
||||
@ -320,7 +320,7 @@ all = [
|
||||
# failing on new releases. Keeping lower bounds loose here means that dependabot
|
||||
# can bump versions without having to update the content-hash in the lockfile.
|
||||
# This helps prevents merge conflicts when running a batch of dependabot updates.
|
||||
ruff = "0.7.3"
|
||||
ruff = "0.11.10"
|
||||
# Type checking only works with the pydantic.v1 compat module from pydantic v2
|
||||
pydantic = "^2"
|
||||
|
||||
|
||||
@ -1065,7 +1065,7 @@ class Porter:
|
||||
|
||||
def get_sent_table_size(txn: LoggingTransaction) -> int:
|
||||
txn.execute(
|
||||
"SELECT count(*) FROM sent_transactions" " WHERE ts >= ?", (yesterday,)
|
||||
"SELECT count(*) FROM sent_transactions WHERE ts >= ?", (yesterday,)
|
||||
)
|
||||
result = txn.fetchone()
|
||||
assert result is not None
|
||||
|
||||
@ -292,9 +292,9 @@ def main() -> None:
|
||||
for key in worker_config:
|
||||
if key == "worker_app": # But we allow worker_app
|
||||
continue
|
||||
assert not key.startswith(
|
||||
"worker_"
|
||||
), "Main process cannot use worker_* config"
|
||||
assert not key.startswith("worker_"), (
|
||||
"Main process cannot use worker_* config"
|
||||
)
|
||||
else:
|
||||
worker_pidfile = worker_config["worker_pid_file"]
|
||||
worker_cache_factor = worker_config.get("synctl_cache_factor")
|
||||
|
||||
@ -287,8 +287,7 @@ class GenericWorkerServer(HomeServer):
|
||||
elif listener.type == "metrics":
|
||||
if not self.config.metrics.enable_metrics:
|
||||
logger.warning(
|
||||
"Metrics listener configured, but "
|
||||
"enable_metrics is not True!"
|
||||
"Metrics listener configured, but enable_metrics is not True!"
|
||||
)
|
||||
else:
|
||||
if isinstance(listener, TCPListenerConfig):
|
||||
|
||||
@ -289,8 +289,7 @@ class SynapseHomeServer(HomeServer):
|
||||
elif listener.type == "metrics":
|
||||
if not self.config.metrics.enable_metrics:
|
||||
logger.warning(
|
||||
"Metrics listener configured, but "
|
||||
"enable_metrics is not True!"
|
||||
"Metrics listener configured, but enable_metrics is not True!"
|
||||
)
|
||||
else:
|
||||
if isinstance(listener, TCPListenerConfig):
|
||||
|
||||
@ -108,8 +108,7 @@ class TlsConfig(Config):
|
||||
# Raise an error if this option has been specified without any
|
||||
# corresponding certificates.
|
||||
raise ConfigError(
|
||||
"federation_custom_ca_list specified without "
|
||||
"any certificate files"
|
||||
"federation_custom_ca_list specified without any certificate files"
|
||||
)
|
||||
|
||||
certs = []
|
||||
|
||||
@ -986,8 +986,7 @@ def _check_power_levels(
|
||||
if old_level == user_level:
|
||||
raise AuthError(
|
||||
403,
|
||||
"You don't have permission to remove ops level equal "
|
||||
"to your own",
|
||||
"You don't have permission to remove ops level equal to your own",
|
||||
)
|
||||
|
||||
# Check if the old and new levels are greater than the user level
|
||||
|
||||
@ -1163,7 +1163,7 @@ class E2eKeysHandler:
|
||||
devices = devices[user_id]
|
||||
except SynapseError as e:
|
||||
failure = _exception_to_failure(e)
|
||||
failures[user_id] = {device: failure for device in signatures.keys()}
|
||||
failures[user_id] = dict.fromkeys(signatures.keys(), failure)
|
||||
return signature_list, failures
|
||||
|
||||
for device_id, device in signatures.items():
|
||||
@ -1303,7 +1303,7 @@ class E2eKeysHandler:
|
||||
except SynapseError as e:
|
||||
failure = _exception_to_failure(e)
|
||||
for user, devicemap in signatures.items():
|
||||
failures[user] = {device_id: failure for device_id in devicemap.keys()}
|
||||
failures[user] = dict.fromkeys(devicemap.keys(), failure)
|
||||
return signature_list, failures
|
||||
|
||||
for target_user, devicemap in signatures.items():
|
||||
@ -1344,9 +1344,7 @@ class E2eKeysHandler:
|
||||
# other devices were signed -- mark those as failures
|
||||
logger.debug("upload signature: too many devices specified")
|
||||
failure = _exception_to_failure(NotFoundError("Unknown device"))
|
||||
failures[target_user] = {
|
||||
device: failure for device in other_devices
|
||||
}
|
||||
failures[target_user] = dict.fromkeys(other_devices, failure)
|
||||
|
||||
if user_signing_key_id in master_key.get("signatures", {}).get(
|
||||
user_id, {}
|
||||
@ -1367,9 +1365,7 @@ class E2eKeysHandler:
|
||||
except SynapseError as e:
|
||||
failure = _exception_to_failure(e)
|
||||
if device_id is None:
|
||||
failures[target_user] = {
|
||||
device_id: failure for device_id in devicemap.keys()
|
||||
}
|
||||
failures[target_user] = dict.fromkeys(devicemap.keys(), failure)
|
||||
else:
|
||||
failures.setdefault(target_user, {})[device_id] = failure
|
||||
|
||||
|
||||
@ -1312,9 +1312,9 @@ class FederationHandler:
|
||||
if state_key is not None:
|
||||
# the event was not rejected (get_event raises a NotFoundError for rejected
|
||||
# events) so the state at the event should include the event itself.
|
||||
assert (
|
||||
state_map.get((event.type, state_key)) == event.event_id
|
||||
), "State at event did not include event itself"
|
||||
assert state_map.get((event.type, state_key)) == event.event_id, (
|
||||
"State at event did not include event itself"
|
||||
)
|
||||
|
||||
# ... but we need the state *before* that event
|
||||
if "replaces_state" in event.unsigned:
|
||||
|
||||
@ -143,9 +143,9 @@ class MessageHandler:
|
||||
elif membership == Membership.LEAVE:
|
||||
key = (event_type, state_key)
|
||||
# If the membership is not JOIN, then the event ID should exist.
|
||||
assert (
|
||||
membership_event_id is not None
|
||||
), "check_user_in_room_or_world_readable returned invalid data"
|
||||
assert membership_event_id is not None, (
|
||||
"check_user_in_room_or_world_readable returned invalid data"
|
||||
)
|
||||
room_state = await self._state_storage_controller.get_state_for_events(
|
||||
[membership_event_id], StateFilter.from_types([key])
|
||||
)
|
||||
@ -242,9 +242,9 @@ class MessageHandler:
|
||||
room_state = await self.store.get_events(state_ids.values())
|
||||
elif membership == Membership.LEAVE:
|
||||
# If the membership is not JOIN, then the event ID should exist.
|
||||
assert (
|
||||
membership_event_id is not None
|
||||
), "check_user_in_room_or_world_readable returned invalid data"
|
||||
assert membership_event_id is not None, (
|
||||
"check_user_in_room_or_world_readable returned invalid data"
|
||||
)
|
||||
room_state_events = (
|
||||
await self._state_storage_controller.get_state_for_events(
|
||||
[membership_event_id], state_filter=state_filter
|
||||
@ -1266,12 +1266,14 @@ class EventCreationHandler:
|
||||
# Allow an event to have empty list of prev_event_ids
|
||||
# only if it has auth_event_ids.
|
||||
or auth_event_ids
|
||||
), "Attempting to create a non-m.room.create event with no prev_events or auth_event_ids"
|
||||
), (
|
||||
"Attempting to create a non-m.room.create event with no prev_events or auth_event_ids"
|
||||
)
|
||||
else:
|
||||
# we now ought to have some prev_events (unless it's a create event).
|
||||
assert (
|
||||
builder.type == EventTypes.Create or prev_event_ids
|
||||
), "Attempting to create a non-m.room.create event with no prev_events"
|
||||
assert builder.type == EventTypes.Create or prev_event_ids, (
|
||||
"Attempting to create a non-m.room.create event with no prev_events"
|
||||
)
|
||||
|
||||
if for_batch:
|
||||
assert prev_event_ids is not None
|
||||
|
||||
@ -1192,9 +1192,9 @@ class SsoHandler:
|
||||
"""
|
||||
|
||||
# It is expected that this is the main process.
|
||||
assert isinstance(
|
||||
self._device_handler, DeviceHandler
|
||||
), "revoking SSO sessions can only be called on the main process"
|
||||
assert isinstance(self._device_handler, DeviceHandler), (
|
||||
"revoking SSO sessions can only be called on the main process"
|
||||
)
|
||||
|
||||
# Invalidate any running user-mapping sessions
|
||||
to_delete = []
|
||||
|
||||
@ -425,9 +425,9 @@ class MatrixFederationHttpClient:
|
||||
)
|
||||
else:
|
||||
proxy_authorization_secret = hs.config.worker.worker_replication_secret
|
||||
assert (
|
||||
proxy_authorization_secret is not None
|
||||
), "`worker_replication_secret` must be set when using `outbound_federation_restricted_to` (used to authenticate requests across workers)"
|
||||
assert proxy_authorization_secret is not None, (
|
||||
"`worker_replication_secret` must be set when using `outbound_federation_restricted_to` (used to authenticate requests across workers)"
|
||||
)
|
||||
federation_proxy_credentials = BearerProxyCredentials(
|
||||
proxy_authorization_secret.encode("ascii")
|
||||
)
|
||||
|
||||
@ -173,9 +173,9 @@ class ProxyAgent(_AgentBase):
|
||||
self._federation_proxy_endpoint: Optional[IStreamClientEndpoint] = None
|
||||
self._federation_proxy_credentials: Optional[ProxyCredentials] = None
|
||||
if federation_proxy_locations:
|
||||
assert (
|
||||
federation_proxy_credentials is not None
|
||||
), "`federation_proxy_credentials` are required when using `federation_proxy_locations`"
|
||||
assert federation_proxy_credentials is not None, (
|
||||
"`federation_proxy_credentials` are required when using `federation_proxy_locations`"
|
||||
)
|
||||
|
||||
endpoints: List[IStreamClientEndpoint] = []
|
||||
for federation_proxy_location in federation_proxy_locations:
|
||||
@ -302,9 +302,9 @@ class ProxyAgent(_AgentBase):
|
||||
parsed_uri.scheme == b"matrix-federation"
|
||||
and self._federation_proxy_endpoint
|
||||
):
|
||||
assert (
|
||||
self._federation_proxy_credentials is not None
|
||||
), "`federation_proxy_credentials` are required when using `federation_proxy_locations`"
|
||||
assert self._federation_proxy_credentials is not None, (
|
||||
"`federation_proxy_credentials` are required when using `federation_proxy_locations`"
|
||||
)
|
||||
|
||||
# Set a Proxy-Authorization header
|
||||
if headers is None:
|
||||
|
||||
@ -582,9 +582,9 @@ def parse_enum(
|
||||
is not one of those allowed values.
|
||||
"""
|
||||
# Assert the enum values are strings.
|
||||
assert all(
|
||||
isinstance(e.value, str) for e in E
|
||||
), "parse_enum only works with string values"
|
||||
assert all(isinstance(e.value, str) for e in E), (
|
||||
"parse_enum only works with string values"
|
||||
)
|
||||
str_value = parse_string(
|
||||
request,
|
||||
name,
|
||||
|
||||
@ -894,9 +894,9 @@ class ModuleApi:
|
||||
Raises:
|
||||
synapse.api.errors.AuthError: the access token is invalid
|
||||
"""
|
||||
assert isinstance(
|
||||
self._device_handler, DeviceHandler
|
||||
), "invalidate_access_token can only be called on the main process"
|
||||
assert isinstance(self._device_handler, DeviceHandler), (
|
||||
"invalidate_access_token can only be called on the main process"
|
||||
)
|
||||
|
||||
# see if the access token corresponds to a device
|
||||
user_info = yield defer.ensureDeferred(
|
||||
|
||||
@ -128,9 +128,9 @@ class ReplicationEndpoint(metaclass=abc.ABCMeta):
|
||||
|
||||
# We reserve `instance_name` as a parameter to sending requests, so we
|
||||
# assert here that sub classes don't try and use the name.
|
||||
assert (
|
||||
"instance_name" not in self.PATH_ARGS
|
||||
), "`instance_name` is a reserved parameter name"
|
||||
assert "instance_name" not in self.PATH_ARGS, (
|
||||
"`instance_name` is a reserved parameter name"
|
||||
)
|
||||
assert (
|
||||
"instance_name"
|
||||
not in signature(self.__class__._serialize_payload).parameters
|
||||
|
||||
@ -200,9 +200,9 @@ class EventsStream(_StreamFromIdGen):
|
||||
|
||||
# we rely on get_all_new_forward_event_rows strictly honouring the limit, so
|
||||
# that we know it is safe to just take upper_limit = event_rows[-1][0].
|
||||
assert (
|
||||
len(event_rows) <= target_row_count
|
||||
), "get_all_new_forward_event_rows did not honour row limit"
|
||||
assert len(event_rows) <= target_row_count, (
|
||||
"get_all_new_forward_event_rows did not honour row limit"
|
||||
)
|
||||
|
||||
# if we hit the limit on event_updates, there's no point in going beyond the
|
||||
# last stream_id in the batch for the other sources.
|
||||
|
||||
@ -207,8 +207,7 @@ class PurgeHistoryRestServlet(RestServlet):
|
||||
(stream, topo, _event_id) = r
|
||||
token = "t%d-%d" % (topo, stream)
|
||||
logger.info(
|
||||
"[purge] purging up to token %s (received_ts %i => "
|
||||
"stream_ordering %i)",
|
||||
"[purge] purging up to token %s (received_ts %i => stream_ordering %i)",
|
||||
token,
|
||||
ts,
|
||||
stream_ordering,
|
||||
|
||||
@ -39,9 +39,7 @@ logger = logging.getLogger(__name__)
|
||||
|
||||
class ReceiptRestServlet(RestServlet):
|
||||
PATTERNS = client_patterns(
|
||||
"/rooms/(?P<room_id>[^/]*)"
|
||||
"/receipt/(?P<receipt_type>[^/]*)"
|
||||
"/(?P<event_id>[^/]*)$"
|
||||
"/rooms/(?P<room_id>[^/]*)/receipt/(?P<receipt_type>[^/]*)/(?P<event_id>[^/]*)$"
|
||||
)
|
||||
CATEGORY = "Receipts requests"
|
||||
|
||||
|
||||
@ -44,9 +44,9 @@ class MSC4108DelegationRendezvousServlet(RestServlet):
|
||||
redirection_target: Optional[str] = (
|
||||
hs.config.experimental.msc4108_delegation_endpoint
|
||||
)
|
||||
assert (
|
||||
redirection_target is not None
|
||||
), "Servlet is only registered if there is a delegation target"
|
||||
assert redirection_target is not None, (
|
||||
"Servlet is only registered if there is a delegation target"
|
||||
)
|
||||
self.endpoint = redirection_target.encode("utf-8")
|
||||
|
||||
async def on_POST(self, request: SynapseRequest) -> None:
|
||||
|
||||
@ -94,9 +94,9 @@ class HttpTransactionCache:
|
||||
# (appservice and guest users), but does not cover access tokens minted
|
||||
# by the admin API. Use the access token ID instead.
|
||||
else:
|
||||
assert (
|
||||
requester.access_token_id is not None
|
||||
), "Requester must have an access_token_id"
|
||||
assert requester.access_token_id is not None, (
|
||||
"Requester must have an access_token_id"
|
||||
)
|
||||
return (path, "user_admin", requester.access_token_id)
|
||||
|
||||
def fetch_or_execute_request(
|
||||
|
||||
@ -739,9 +739,9 @@ class BackgroundUpdater:
|
||||
c.execute(sql)
|
||||
|
||||
async def updater(progress: JsonDict, batch_size: int) -> int:
|
||||
assert isinstance(
|
||||
self.db_pool.engine, engines.PostgresEngine
|
||||
), "validate constraint background update registered for non-Postres database"
|
||||
assert isinstance(self.db_pool.engine, engines.PostgresEngine), (
|
||||
"validate constraint background update registered for non-Postres database"
|
||||
)
|
||||
|
||||
logger.info("Validating constraint %s to %s", constraint_name, table)
|
||||
await self.db_pool.runWithConnection(runner)
|
||||
@ -900,9 +900,9 @@ class BackgroundUpdater:
|
||||
on the table. Used to iterate over the table.
|
||||
"""
|
||||
|
||||
assert isinstance(
|
||||
self.db_pool.engine, engines.PostgresEngine
|
||||
), "validate constraint background update registered for non-Postres database"
|
||||
assert isinstance(self.db_pool.engine, engines.PostgresEngine), (
|
||||
"validate constraint background update registered for non-Postres database"
|
||||
)
|
||||
|
||||
async def updater(progress: JsonDict, batch_size: int) -> int:
|
||||
return await self.validate_constraint_and_delete_in_background(
|
||||
|
||||
@ -870,8 +870,7 @@ class EventsPersistenceStorageController:
|
||||
# This should only happen for outlier events.
|
||||
if not ev.internal_metadata.is_outlier():
|
||||
raise Exception(
|
||||
"Context for new event %s has no state "
|
||||
"group" % (ev.event_id,)
|
||||
"Context for new event %s has no state group" % (ev.event_id,)
|
||||
)
|
||||
continue
|
||||
if ctx.state_group_deltas:
|
||||
|
||||
@ -650,9 +650,9 @@ class ClientIpWorkerStore(ClientIpBackgroundUpdateStore, MonthlyActiveUsersWorke
|
||||
|
||||
@wrap_as_background_process("update_client_ips")
|
||||
async def _update_client_ips_batch(self) -> None:
|
||||
assert (
|
||||
self._update_on_this_worker
|
||||
), "This worker is not designated to update client IPs"
|
||||
assert self._update_on_this_worker, (
|
||||
"This worker is not designated to update client IPs"
|
||||
)
|
||||
|
||||
# If the DB pool has already terminated, don't try updating
|
||||
if not self.db_pool.is_running():
|
||||
@ -671,9 +671,9 @@ class ClientIpWorkerStore(ClientIpBackgroundUpdateStore, MonthlyActiveUsersWorke
|
||||
txn: LoggingTransaction,
|
||||
to_update: Mapping[Tuple[str, str, str], Tuple[str, Optional[str], int]],
|
||||
) -> None:
|
||||
assert (
|
||||
self._update_on_this_worker
|
||||
), "This worker is not designated to update client IPs"
|
||||
assert self._update_on_this_worker, (
|
||||
"This worker is not designated to update client IPs"
|
||||
)
|
||||
|
||||
# Keys and values for the `user_ips` upsert.
|
||||
user_ips_keys = []
|
||||
|
||||
@ -200,9 +200,9 @@ class DeviceInboxWorkerStore(SQLBaseStore):
|
||||
to_stream_id=to_stream_id,
|
||||
)
|
||||
|
||||
assert (
|
||||
last_processed_stream_id == to_stream_id
|
||||
), "Expected _get_device_messages to process all to-device messages up to `to_stream_id`"
|
||||
assert last_processed_stream_id == to_stream_id, (
|
||||
"Expected _get_device_messages to process all to-device messages up to `to_stream_id`"
|
||||
)
|
||||
|
||||
return user_id_device_id_to_messages
|
||||
|
||||
|
||||
@ -1092,7 +1092,7 @@ class DeviceWorkerStore(RoomMemberWorkerStore, EndToEndKeyWorkerStore):
|
||||
),
|
||||
)
|
||||
|
||||
results: Dict[str, Optional[str]] = {user_id: None for user_id in user_ids}
|
||||
results: Dict[str, Optional[str]] = dict.fromkeys(user_ids)
|
||||
results.update(rows)
|
||||
|
||||
return results
|
||||
|
||||
@ -246,9 +246,9 @@ class PersistEventsStore:
|
||||
self.is_mine_id = hs.is_mine_id
|
||||
|
||||
# This should only exist on instances that are configured to write
|
||||
assert (
|
||||
hs.get_instance_name() in hs.config.worker.writers.events
|
||||
), "Can only instantiate EventsStore on master"
|
||||
assert hs.get_instance_name() in hs.config.worker.writers.events, (
|
||||
"Can only instantiate EventsStore on master"
|
||||
)
|
||||
|
||||
# Since we have been configured to write, we ought to have id generators,
|
||||
# rather than id trackers.
|
||||
@ -465,9 +465,9 @@ class PersistEventsStore:
|
||||
missing_membership_event_ids
|
||||
)
|
||||
# There shouldn't be any missing events
|
||||
assert (
|
||||
remaining_events.keys() == missing_membership_event_ids
|
||||
), missing_membership_event_ids.difference(remaining_events.keys())
|
||||
assert remaining_events.keys() == missing_membership_event_ids, (
|
||||
missing_membership_event_ids.difference(remaining_events.keys())
|
||||
)
|
||||
membership_event_map.update(remaining_events)
|
||||
|
||||
for (
|
||||
@ -534,9 +534,9 @@ class PersistEventsStore:
|
||||
missing_state_event_ids
|
||||
)
|
||||
# There shouldn't be any missing events
|
||||
assert (
|
||||
remaining_events.keys() == missing_state_event_ids
|
||||
), missing_state_event_ids.difference(remaining_events.keys())
|
||||
assert remaining_events.keys() == missing_state_event_ids, (
|
||||
missing_state_event_ids.difference(remaining_events.keys())
|
||||
)
|
||||
for event in remaining_events.values():
|
||||
current_state_map[(event.type, event.state_key)] = event
|
||||
|
||||
@ -644,9 +644,9 @@ class PersistEventsStore:
|
||||
if missing_event_ids:
|
||||
remaining_events = await self.store.get_events(missing_event_ids)
|
||||
# There shouldn't be any missing events
|
||||
assert (
|
||||
remaining_events.keys() == missing_event_ids
|
||||
), missing_event_ids.difference(remaining_events.keys())
|
||||
assert remaining_events.keys() == missing_event_ids, (
|
||||
missing_event_ids.difference(remaining_events.keys())
|
||||
)
|
||||
for event in remaining_events.values():
|
||||
current_state_map[(event.type, event.state_key)] = event
|
||||
|
||||
@ -3448,8 +3448,7 @@ class PersistEventsStore:
|
||||
# Delete all these events that we've already fetched and now know that their
|
||||
# prev events are the new backwards extremeties.
|
||||
query = (
|
||||
"DELETE FROM event_backward_extremities"
|
||||
" WHERE event_id = ? AND room_id = ?"
|
||||
"DELETE FROM event_backward_extremities WHERE event_id = ? AND room_id = ?"
|
||||
)
|
||||
backward_extremity_tuples_to_remove = [
|
||||
(ev.event_id, ev.room_id)
|
||||
|
||||
@ -824,9 +824,9 @@ class EventsWorkerStore(SQLBaseStore):
|
||||
|
||||
if missing_events_ids:
|
||||
|
||||
async def get_missing_events_from_cache_or_db() -> (
|
||||
Dict[str, EventCacheEntry]
|
||||
):
|
||||
async def get_missing_events_from_cache_or_db() -> Dict[
|
||||
str, EventCacheEntry
|
||||
]:
|
||||
"""Fetches the events in `missing_event_ids` from the database.
|
||||
|
||||
Also creates entries in `self._current_event_fetches` to allow
|
||||
|
||||
@ -304,9 +304,9 @@ class MonthlyActiveUsersWorkerStore(RegistrationWorkerStore):
|
||||
txn:
|
||||
threepids: List of threepid dicts to reserve
|
||||
"""
|
||||
assert (
|
||||
self._update_on_this_worker
|
||||
), "This worker is not designated to update MAUs"
|
||||
assert self._update_on_this_worker, (
|
||||
"This worker is not designated to update MAUs"
|
||||
)
|
||||
|
||||
# XXX what is this function trying to achieve? It upserts into
|
||||
# monthly_active_users for each *registered* reserved mau user, but why?
|
||||
@ -340,9 +340,9 @@ class MonthlyActiveUsersWorkerStore(RegistrationWorkerStore):
|
||||
Args:
|
||||
user_id: user to add/update
|
||||
"""
|
||||
assert (
|
||||
self._update_on_this_worker
|
||||
), "This worker is not designated to update MAUs"
|
||||
assert self._update_on_this_worker, (
|
||||
"This worker is not designated to update MAUs"
|
||||
)
|
||||
|
||||
# Support user never to be included in MAU stats. Note I can't easily call this
|
||||
# from upsert_monthly_active_user_txn because then I need a _txn form of
|
||||
@ -379,9 +379,9 @@ class MonthlyActiveUsersWorkerStore(RegistrationWorkerStore):
|
||||
txn:
|
||||
user_id: user to add/update
|
||||
"""
|
||||
assert (
|
||||
self._update_on_this_worker
|
||||
), "This worker is not designated to update MAUs"
|
||||
assert self._update_on_this_worker, (
|
||||
"This worker is not designated to update MAUs"
|
||||
)
|
||||
|
||||
# Am consciously deciding to lock the table on the basis that is ought
|
||||
# never be a big table and alternative approaches (batching multiple
|
||||
@ -409,9 +409,9 @@ class MonthlyActiveUsersWorkerStore(RegistrationWorkerStore):
|
||||
Args:
|
||||
user_id: the user_id to query
|
||||
"""
|
||||
assert (
|
||||
self._update_on_this_worker
|
||||
), "This worker is not designated to update MAUs"
|
||||
assert self._update_on_this_worker, (
|
||||
"This worker is not designated to update MAUs"
|
||||
)
|
||||
|
||||
if self._limit_usage_by_mau or self._mau_stats_only:
|
||||
# Trial users and guests should not be included as part of MAU group
|
||||
|
||||
@ -199,8 +199,7 @@ class PurgeEventsStore(StateGroupWorkerStore, CacheInvalidationWorkerStore):
|
||||
|
||||
# Update backward extremeties
|
||||
txn.execute_batch(
|
||||
"INSERT INTO event_backward_extremities (room_id, event_id)"
|
||||
" VALUES (?, ?)",
|
||||
"INSERT INTO event_backward_extremities (room_id, event_id) VALUES (?, ?)",
|
||||
[(room_id, event_id) for (event_id,) in new_backwards_extrems],
|
||||
)
|
||||
|
||||
|
||||
@ -98,9 +98,9 @@ class StateDeltasStore(SQLBaseStore):
|
||||
prev_stream_id = int(prev_stream_id)
|
||||
|
||||
# check we're not going backwards
|
||||
assert (
|
||||
prev_stream_id <= max_stream_id
|
||||
), f"New stream id {max_stream_id} is smaller than prev stream id {prev_stream_id}"
|
||||
assert prev_stream_id <= max_stream_id, (
|
||||
f"New stream id {max_stream_id} is smaller than prev stream id {prev_stream_id}"
|
||||
)
|
||||
|
||||
if not self._curr_state_delta_stream_cache.has_any_entity_changed(
|
||||
prev_stream_id
|
||||
|
||||
@ -274,10 +274,7 @@ class TagsWorkerStore(AccountDataWorkerStore):
|
||||
assert isinstance(self._account_data_id_gen, AbstractStreamIdGenerator)
|
||||
|
||||
def remove_tag_txn(txn: LoggingTransaction, next_id: int) -> None:
|
||||
sql = (
|
||||
"DELETE FROM room_tags "
|
||||
" WHERE user_id = ? AND room_id = ? AND tag = ?"
|
||||
)
|
||||
sql = "DELETE FROM room_tags WHERE user_id = ? AND room_id = ? AND tag = ?"
|
||||
txn.execute(sql, (user_id, room_id, tag))
|
||||
self._update_revision_txn(txn, user_id, room_id, next_id)
|
||||
|
||||
|
||||
@ -582,9 +582,9 @@ class UserDirectoryBackgroundUpdateStore(StateDeltasStore):
|
||||
retry_counter: number of failures in refreshing the profile so far. Used for
|
||||
exponential backoff calculations.
|
||||
"""
|
||||
assert not self.hs.is_mine_id(
|
||||
user_id
|
||||
), "Can't mark a local user as a stale remote user."
|
||||
assert not self.hs.is_mine_id(user_id), (
|
||||
"Can't mark a local user as a stale remote user."
|
||||
)
|
||||
|
||||
server_name = UserID.from_string(user_id).domain
|
||||
|
||||
|
||||
@ -396,8 +396,7 @@ class StateBackgroundUpdateStore(StateGroupBackgroundUpdateStore):
|
||||
return True, count
|
||||
|
||||
txn.execute(
|
||||
"SELECT state_group FROM state_group_edges"
|
||||
" WHERE state_group = ?",
|
||||
"SELECT state_group FROM state_group_edges WHERE state_group = ?",
|
||||
(state_group,),
|
||||
)
|
||||
|
||||
|
||||
@ -75,8 +75,7 @@ def run_create(cur: LoggingTransaction, database_engine: BaseDatabaseEngine) ->
|
||||
progress_json = json.dumps(progress)
|
||||
|
||||
sql = (
|
||||
"INSERT into background_updates (update_name, progress_json)"
|
||||
" VALUES (?, ?)"
|
||||
"INSERT into background_updates (update_name, progress_json) VALUES (?, ?)"
|
||||
)
|
||||
|
||||
cur.execute(sql, ("event_search", progress_json))
|
||||
|
||||
@ -55,8 +55,7 @@ def run_create(cur: LoggingTransaction, database_engine: BaseDatabaseEngine) ->
|
||||
progress_json = json.dumps(progress)
|
||||
|
||||
sql = (
|
||||
"INSERT into background_updates (update_name, progress_json)"
|
||||
" VALUES (?, ?)"
|
||||
"INSERT into background_updates (update_name, progress_json) VALUES (?, ?)"
|
||||
)
|
||||
|
||||
cur.execute(sql, ("event_origin_server_ts", progress_json))
|
||||
|
||||
@ -59,8 +59,7 @@ def run_create(cur: LoggingTransaction, database_engine: BaseDatabaseEngine) ->
|
||||
progress_json = json.dumps(progress)
|
||||
|
||||
sql = (
|
||||
"INSERT into background_updates (update_name, progress_json)"
|
||||
" VALUES (?, ?)"
|
||||
"INSERT into background_updates (update_name, progress_json) VALUES (?, ?)"
|
||||
)
|
||||
|
||||
cur.execute(sql, ("event_search_order", progress_json))
|
||||
|
||||
@ -55,8 +55,7 @@ def run_create(cur: LoggingTransaction, database_engine: BaseDatabaseEngine) ->
|
||||
progress_json = json.dumps(progress)
|
||||
|
||||
sql = (
|
||||
"INSERT into background_updates (update_name, progress_json)"
|
||||
" VALUES (?, ?)"
|
||||
"INSERT into background_updates (update_name, progress_json) VALUES (?, ?)"
|
||||
)
|
||||
|
||||
cur.execute(sql, ("event_fields_sender_url", progress_json))
|
||||
|
||||
@ -889,8 +889,7 @@ class MultiWriterStreamToken(AbstractMultiWriterStreamToken):
|
||||
def __str__(self) -> str:
|
||||
instances = ", ".join(f"{k}: {v}" for k, v in sorted(self.instance_map.items()))
|
||||
return (
|
||||
f"MultiWriterStreamToken(stream: {self.stream}, "
|
||||
f"instances: {{{instances}}})"
|
||||
f"MultiWriterStreamToken(stream: {self.stream}, instances: {{{instances}}})"
|
||||
)
|
||||
|
||||
|
||||
|
||||
@ -462,7 +462,7 @@ class StateFilter:
|
||||
new_types.update({state_type: set() for state_type in minus_wildcards})
|
||||
|
||||
# insert the plus wildcards
|
||||
new_types.update({state_type: None for state_type in plus_wildcards})
|
||||
new_types.update(dict.fromkeys(plus_wildcards))
|
||||
|
||||
# insert the specific state keys
|
||||
for state_type, state_key in plus_state_keys:
|
||||
|
||||
@ -114,7 +114,7 @@ def sorted_topologically(
|
||||
|
||||
# This is implemented by Kahn's algorithm.
|
||||
|
||||
degree_map = {node: 0 for node in nodes}
|
||||
degree_map = dict.fromkeys(nodes, 0)
|
||||
reverse_graph: Dict[T, Set[T]] = {}
|
||||
|
||||
for node, edges in graph.items():
|
||||
@ -164,7 +164,7 @@ def sorted_topologically_batched(
|
||||
persisted.
|
||||
"""
|
||||
|
||||
degree_map = {node: 0 for node in nodes}
|
||||
degree_map = dict.fromkeys(nodes, 0)
|
||||
reverse_graph: Dict[T, Set[T]] = {}
|
||||
|
||||
for node, edges in graph.items():
|
||||
|
||||
@ -65,20 +65,20 @@ def required_state_json_to_state_map(required_state: Any) -> StateMap[EventBase]
|
||||
if isinstance(required_state, list):
|
||||
for state_event_dict in required_state:
|
||||
# Yell because we're in a test and this is unexpected
|
||||
assert isinstance(
|
||||
state_event_dict, dict
|
||||
), "`required_state` should be a list of event dicts"
|
||||
assert isinstance(state_event_dict, dict), (
|
||||
"`required_state` should be a list of event dicts"
|
||||
)
|
||||
|
||||
event_type = state_event_dict["type"]
|
||||
event_state_key = state_event_dict["state_key"]
|
||||
|
||||
# Yell because we're in a test and this is unexpected
|
||||
assert isinstance(
|
||||
event_type, str
|
||||
), "Each event in `required_state` should have a string `type`"
|
||||
assert isinstance(
|
||||
event_state_key, str
|
||||
), "Each event in `required_state` should have a string `state_key`"
|
||||
assert isinstance(event_type, str), (
|
||||
"Each event in `required_state` should have a string `type`"
|
||||
)
|
||||
assert isinstance(event_state_key, str), (
|
||||
"Each event in `required_state` should have a string `state_key`"
|
||||
)
|
||||
|
||||
state_map[(event_type, event_state_key)] = make_event_from_dict(
|
||||
state_event_dict
|
||||
|
||||
@ -1178,10 +1178,10 @@ class UserDirectoryTestCase(unittest.HomeserverTestCase):
|
||||
for use_numeric in [False, True]:
|
||||
if use_numeric:
|
||||
prefix1 = f"{i}"
|
||||
prefix2 = f"{i+1}"
|
||||
prefix2 = f"{i + 1}"
|
||||
else:
|
||||
prefix1 = f"a{i}"
|
||||
prefix2 = f"a{i+1}"
|
||||
prefix2 = f"a{i + 1}"
|
||||
|
||||
local_user_1 = self.register_user(f"user{char}{prefix1}", "password")
|
||||
local_user_2 = self.register_user(f"user{char}{prefix2}", "password")
|
||||
|
||||
@ -436,8 +436,7 @@ class FederationClientTests(HomeserverTestCase):
|
||||
|
||||
# Send it the HTTP response
|
||||
client.dataReceived(
|
||||
b"HTTP/1.1 200 OK\r\nContent-Type: application/json\r\n"
|
||||
b"Server: Fake\r\n\r\n"
|
||||
b"HTTP/1.1 200 OK\r\nContent-Type: application/json\r\nServer: Fake\r\n\r\n"
|
||||
)
|
||||
|
||||
# Push by enough to time it out
|
||||
@ -691,10 +690,7 @@ class FederationClientTests(HomeserverTestCase):
|
||||
|
||||
# Send it a huge HTTP response
|
||||
protocol.dataReceived(
|
||||
b"HTTP/1.1 200 OK\r\n"
|
||||
b"Server: Fake\r\n"
|
||||
b"Content-Type: application/json\r\n"
|
||||
b"\r\n"
|
||||
b"HTTP/1.1 200 OK\r\nServer: Fake\r\nContent-Type: application/json\r\n\r\n"
|
||||
)
|
||||
|
||||
self.pump()
|
||||
|
||||
@ -250,9 +250,7 @@ small_cmyk_jpeg = TestImage(
|
||||
)
|
||||
|
||||
small_lossless_webp = TestImage(
|
||||
unhexlify(
|
||||
b"524946461a000000574542505650384c0d0000002f0000001007" b"1011118888fe0700"
|
||||
),
|
||||
unhexlify(b"524946461a000000574542505650384c0d0000002f00000010071011118888fe0700"),
|
||||
b"image/webp",
|
||||
b".webp",
|
||||
)
|
||||
|
||||
@ -324,7 +324,7 @@ class EventsStreamTestCase(BaseStreamTestCase):
|
||||
pls = self.helper.get_state(
|
||||
self.room_id, EventTypes.PowerLevels, tok=self.user_tok
|
||||
)
|
||||
pls["users"].update({u: 50 for u in user_ids})
|
||||
pls["users"].update(dict.fromkeys(user_ids, 50))
|
||||
self.helper.send_state(
|
||||
self.room_id,
|
||||
EventTypes.PowerLevels,
|
||||
|
||||
@ -1312,7 +1312,7 @@ class RoomTestCase(unittest.HomeserverTestCase):
|
||||
# Check that response json body contains a "rooms" key
|
||||
self.assertTrue(
|
||||
"rooms" in channel.json_body,
|
||||
msg="Response body does not " "contain a 'rooms' key",
|
||||
msg="Response body does not contain a 'rooms' key",
|
||||
)
|
||||
|
||||
# Check that 3 rooms were returned
|
||||
|
||||
@ -3901,9 +3901,7 @@ class UserMediaRestTestCase(unittest.HomeserverTestCase):
|
||||
image_data1 = SMALL_PNG
|
||||
# Resolution: 1×1, MIME type: image/gif, Extension: gif, Size: 35 B
|
||||
image_data2 = unhexlify(
|
||||
b"47494638376101000100800100000000"
|
||||
b"ffffff2c00000000010001000002024c"
|
||||
b"01003b"
|
||||
b"47494638376101000100800100000000ffffff2c00000000010001000002024c01003b"
|
||||
)
|
||||
# Resolution: 1×1, MIME type: image/bmp, Extension: bmp, Size: 54 B
|
||||
image_data3 = unhexlify(
|
||||
|
||||
@ -309,8 +309,8 @@ class SlidingSyncRoomsTimelineTestCase(SlidingSyncBase):
|
||||
self.assertEqual(
|
||||
response_body["rooms"][room_id1]["limited"],
|
||||
False,
|
||||
f'Our `timeline_limit` was {sync_body["lists"]["foo-list"]["timeline_limit"]} '
|
||||
+ f'and {len(response_body["rooms"][room_id1]["timeline"])} events were returned in the timeline. '
|
||||
f"Our `timeline_limit` was {sync_body['lists']['foo-list']['timeline_limit']} "
|
||||
+ f"and {len(response_body['rooms'][room_id1]['timeline'])} events were returned in the timeline. "
|
||||
+ str(response_body["rooms"][room_id1]),
|
||||
)
|
||||
# Check to make sure the latest events are returned
|
||||
@ -387,7 +387,7 @@ class SlidingSyncRoomsTimelineTestCase(SlidingSyncBase):
|
||||
response_body["rooms"][room_id1]["limited"],
|
||||
True,
|
||||
f"Our `timeline_limit` was {timeline_limit} "
|
||||
+ f'and {len(response_body["rooms"][room_id1]["timeline"])} events were returned in the timeline. '
|
||||
+ f"and {len(response_body['rooms'][room_id1]['timeline'])} events were returned in the timeline. "
|
||||
+ str(response_body["rooms"][room_id1]),
|
||||
)
|
||||
# Check to make sure that the "live" and historical events are returned
|
||||
|
||||
@ -1006,7 +1006,7 @@ class URLPreviewTests(unittest.HomeserverTestCase):
|
||||
data = base64.b64encode(SMALL_PNG)
|
||||
|
||||
end_content = (
|
||||
b"<html><head>" b'<img src="data:image/png;base64,%s" />' b"</head></html>"
|
||||
b'<html><head><img src="data:image/png;base64,%s" /></head></html>'
|
||||
) % (data,)
|
||||
|
||||
channel = self.make_request(
|
||||
|
||||
@ -716,9 +716,9 @@ class RestHelper:
|
||||
"/login",
|
||||
content={"type": "m.login.token", "token": login_token},
|
||||
)
|
||||
assert (
|
||||
channel.code == expected_status
|
||||
), f"unexpected status in response: {channel.code}"
|
||||
assert channel.code == expected_status, (
|
||||
f"unexpected status in response: {channel.code}"
|
||||
)
|
||||
return channel.json_body
|
||||
|
||||
def auth_via_oidc(
|
||||
|
||||
@ -878,7 +878,7 @@ class URLPreviewTests(unittest.HomeserverTestCase):
|
||||
data = base64.b64encode(SMALL_PNG)
|
||||
|
||||
end_content = (
|
||||
b"<html><head>" b'<img src="data:image/png;base64,%s" />' b"</head></html>"
|
||||
b'<html><head><img src="data:image/png;base64,%s" /></head></html>'
|
||||
) % (data,)
|
||||
|
||||
channel = self.make_request(
|
||||
|
||||
@ -225,9 +225,9 @@ class FakeChannel:
|
||||
new_headers.addRawHeader(k, v)
|
||||
headers = new_headers
|
||||
|
||||
assert isinstance(
|
||||
headers, Headers
|
||||
), f"headers are of the wrong type: {headers!r}"
|
||||
assert isinstance(headers, Headers), (
|
||||
f"headers are of the wrong type: {headers!r}"
|
||||
)
|
||||
|
||||
self.result["headers"] = headers
|
||||
|
||||
|
||||
@ -349,7 +349,7 @@ class SQLBaseStoreTestCase(unittest.TestCase):
|
||||
)
|
||||
|
||||
self.mock_txn.execute.assert_called_once_with(
|
||||
"UPDATE tablename SET colC = ?, colD = ? WHERE" " colA = ? AND colB = ?",
|
||||
"UPDATE tablename SET colC = ?, colD = ? WHERE colA = ? AND colB = ?",
|
||||
[3, 4, 1, 2],
|
||||
)
|
||||
|
||||
|
||||
@ -211,9 +211,9 @@ class DeviceStoreTestCase(HomeserverTestCase):
|
||||
even if that means leaving an earlier batch one EDU short of the limit.
|
||||
"""
|
||||
|
||||
assert self.hs.is_mine_id(
|
||||
"@user_id:test"
|
||||
), "Test not valid: this MXID should be considered local"
|
||||
assert self.hs.is_mine_id("@user_id:test"), (
|
||||
"Test not valid: this MXID should be considered local"
|
||||
)
|
||||
|
||||
self.get_success(
|
||||
self.store.set_e2e_cross_signing_key(
|
||||
|
||||
@ -114,7 +114,7 @@ def get_all_topologically_sorted_orders(
|
||||
# This is implemented by Kahn's algorithm, and forking execution each time
|
||||
# we have a choice over which node to consider next.
|
||||
|
||||
degree_map = {node: 0 for node in nodes}
|
||||
degree_map = dict.fromkeys(nodes, 0)
|
||||
reverse_graph: Dict[T, Set[T]] = {}
|
||||
|
||||
for node, edges in graph.items():
|
||||
|
||||
@ -149,7 +149,7 @@ class _DummyStore:
|
||||
async def get_partial_state_events(
|
||||
self, event_ids: Collection[str]
|
||||
) -> Dict[str, bool]:
|
||||
return {e: False for e in event_ids}
|
||||
return dict.fromkeys(event_ids, False)
|
||||
|
||||
async def get_state_group_delta(
|
||||
self, name: str
|
||||
|
||||
@ -48,7 +48,7 @@ def setup_logging() -> None:
|
||||
|
||||
# We exclude `%(asctime)s` from this format because the Twisted logger adds its own
|
||||
# timestamp
|
||||
log_format = "%(name)s - %(lineno)d - " "%(levelname)s - %(request)s - %(message)s"
|
||||
log_format = "%(name)s - %(lineno)d - %(levelname)s - %(request)s - %(message)s"
|
||||
|
||||
handler = ToTwistedHandler()
|
||||
formatter = logging.Formatter(log_format)
|
||||
|
||||
Loading…
Reference in New Issue
Block a user