mirror of
https://github.com/morpheus65535/bazarr
synced 2024-12-22 07:42:32 +00:00
367 lines
13 KiB
Python
367 lines
13 KiB
Python
################################################################
|
|
# The core state machine
|
|
################################################################
|
|
#
|
|
# Rule 1: everything that affects the state machine and state transitions must
|
|
# live here in this file. As much as possible goes into the table-based
|
|
# representation, but for the bits that don't quite fit, the actual code and
|
|
# state must nonetheless live here.
|
|
#
|
|
# Rule 2: this file does not know about what role we're playing; it only knows
|
|
# about HTTP request/response cycles in the abstract. This ensures that we
|
|
# don't cheat and apply different rules to local and remote parties.
|
|
#
|
|
#
|
|
# Theory of operation
|
|
# ===================
|
|
#
|
|
# Possibly the simplest way to think about this is that we actually have 5
|
|
# different state machines here. Yes, 5. These are:
|
|
#
|
|
# 1) The client state, with its complicated automaton (see the docs)
|
|
# 2) The server state, with its complicated automaton (see the docs)
|
|
# 3) The keep-alive state, with possible states {True, False}
|
|
# 4) The SWITCH_CONNECT state, with possible states {False, True}
|
|
# 5) The SWITCH_UPGRADE state, with possible states {False, True}
|
|
#
|
|
# For (3)-(5), the first state listed is the initial state.
|
|
#
|
|
# (1)-(3) are stored explicitly in member variables. The last
|
|
# two are stored implicitly in the pending_switch_proposals set as:
|
|
# (state of 4) == (_SWITCH_CONNECT in pending_switch_proposals)
|
|
# (state of 5) == (_SWITCH_UPGRADE in pending_switch_proposals)
|
|
#
|
|
# And each of these machines has two different kinds of transitions:
|
|
#
|
|
# a) Event-triggered
|
|
# b) State-triggered
|
|
#
|
|
# Event triggered is the obvious thing that you'd think it is: some event
|
|
# happens, and if it's the right event at the right time then a transition
|
|
# happens. But there are somewhat complicated rules for which machines can
|
|
# "see" which events. (As a rule of thumb, if a machine "sees" an event, this
|
|
# means two things: the event can affect the machine, and if the machine is
|
|
# not in a state where it expects that event then it's an error.) These rules
|
|
# are:
|
|
#
|
|
# 1) The client machine sees all h11.events objects emitted by the client.
|
|
#
|
|
# 2) The server machine sees all h11.events objects emitted by the server.
|
|
#
|
|
# It also sees the client's Request event.
|
|
#
|
|
# And sometimes, server events are annotated with a _SWITCH_* event. For
|
|
# example, we can have a (Response, _SWITCH_CONNECT) event, which is
|
|
# different from a regular Response event.
|
|
#
|
|
# 3) The keep-alive machine sees the process_keep_alive_disabled() event
|
|
# (which is derived from Request/Response events), and this event
|
|
# transitions it from True -> False, or from False -> False. There's no way
|
|
# to transition back.
|
|
#
|
|
# 4&5) The _SWITCH_* machines transition from False->True when we get a
|
|
# Request that proposes the relevant type of switch (via
|
|
# process_client_switch_proposals), and they go from True->False when we
|
|
# get a Response that has no _SWITCH_* annotation.
|
|
#
|
|
# So that's event-triggered transitions.
|
|
#
|
|
# State-triggered transitions are less standard. What they do here is couple
|
|
# the machines together. The way this works is, when certain *joint*
|
|
# configurations of states are achieved, then we automatically transition to a
|
|
# new *joint* state. So, for example, if we're ever in a joint state with
|
|
#
|
|
# client: DONE
|
|
# keep-alive: False
|
|
#
|
|
# then the client state immediately transitions to:
|
|
#
|
|
# client: MUST_CLOSE
|
|
#
|
|
# This is fundamentally different from an event-based transition, because it
|
|
# doesn't matter how we arrived at the {client: DONE, keep-alive: False} state
|
|
# -- maybe the client transitioned SEND_BODY -> DONE, or keep-alive
|
|
# transitioned True -> False. Either way, once this precondition is satisfied,
|
|
# this transition is immediately triggered.
|
|
#
|
|
# What if two conflicting state-based transitions get enabled at the same
|
|
# time? In practice there's only one case where this arises (client DONE ->
|
|
# MIGHT_SWITCH_PROTOCOL versus DONE -> MUST_CLOSE), and we resolve it by
|
|
# explicitly prioritizing the DONE -> MIGHT_SWITCH_PROTOCOL transition.
|
|
#
|
|
# Implementation
|
|
# --------------
|
|
#
|
|
# The event-triggered transitions for the server and client machines are all
|
|
# stored explicitly in a table. Ditto for the state-triggered transitions that
|
|
# involve just the server and client state.
|
|
#
|
|
# The transitions for the other machines, and the state-triggered transitions
|
|
# that involve the other machines, are written out as explicit Python code.
|
|
#
|
|
# It'd be nice if there were some cleaner way to do all this. This isn't
|
|
# *too* terrible, but I feel like it could probably be better.
|
|
#
|
|
# WARNING
|
|
# -------
|
|
#
|
|
# The script that generates the state machine diagrams for the docs knows how
|
|
# to read out the EVENT_TRIGGERED_TRANSITIONS and STATE_TRIGGERED_TRANSITIONS
|
|
# tables. But it can't automatically read the transitions that are written
|
|
# directly in Python code. So if you touch those, you need to also update the
|
|
# script to keep it in sync!
|
|
from typing import cast, Dict, Optional, Set, Tuple, Type, Union
|
|
|
|
from ._events import *
|
|
from ._util import LocalProtocolError, Sentinel
|
|
|
|
# Everything in __all__ gets re-exported as part of the h11 public API.
|
|
__all__ = [
|
|
"CLIENT",
|
|
"SERVER",
|
|
"IDLE",
|
|
"SEND_RESPONSE",
|
|
"SEND_BODY",
|
|
"DONE",
|
|
"MUST_CLOSE",
|
|
"CLOSED",
|
|
"MIGHT_SWITCH_PROTOCOL",
|
|
"SWITCHED_PROTOCOL",
|
|
"ERROR",
|
|
]
|
|
|
|
|
|
class CLIENT(Sentinel, metaclass=Sentinel):
|
|
pass
|
|
|
|
|
|
class SERVER(Sentinel, metaclass=Sentinel):
|
|
pass
|
|
|
|
|
|
# States
|
|
class IDLE(Sentinel, metaclass=Sentinel):
|
|
pass
|
|
|
|
|
|
class SEND_RESPONSE(Sentinel, metaclass=Sentinel):
|
|
pass
|
|
|
|
|
|
class SEND_BODY(Sentinel, metaclass=Sentinel):
|
|
pass
|
|
|
|
|
|
class DONE(Sentinel, metaclass=Sentinel):
|
|
pass
|
|
|
|
|
|
class MUST_CLOSE(Sentinel, metaclass=Sentinel):
|
|
pass
|
|
|
|
|
|
class CLOSED(Sentinel, metaclass=Sentinel):
|
|
pass
|
|
|
|
|
|
class ERROR(Sentinel, metaclass=Sentinel):
|
|
pass
|
|
|
|
|
|
# Switch types
|
|
class MIGHT_SWITCH_PROTOCOL(Sentinel, metaclass=Sentinel):
|
|
pass
|
|
|
|
|
|
class SWITCHED_PROTOCOL(Sentinel, metaclass=Sentinel):
|
|
pass
|
|
|
|
|
|
class _SWITCH_UPGRADE(Sentinel, metaclass=Sentinel):
|
|
pass
|
|
|
|
|
|
class _SWITCH_CONNECT(Sentinel, metaclass=Sentinel):
|
|
pass
|
|
|
|
|
|
EventTransitionType = Dict[
|
|
Type[Sentinel],
|
|
Dict[
|
|
Type[Sentinel],
|
|
Dict[Union[Type[Event], Tuple[Type[Event], Type[Sentinel]]], Type[Sentinel]],
|
|
],
|
|
]
|
|
|
|
EVENT_TRIGGERED_TRANSITIONS: EventTransitionType = {
|
|
CLIENT: {
|
|
IDLE: {Request: SEND_BODY, ConnectionClosed: CLOSED},
|
|
SEND_BODY: {Data: SEND_BODY, EndOfMessage: DONE},
|
|
DONE: {ConnectionClosed: CLOSED},
|
|
MUST_CLOSE: {ConnectionClosed: CLOSED},
|
|
CLOSED: {ConnectionClosed: CLOSED},
|
|
MIGHT_SWITCH_PROTOCOL: {},
|
|
SWITCHED_PROTOCOL: {},
|
|
ERROR: {},
|
|
},
|
|
SERVER: {
|
|
IDLE: {
|
|
ConnectionClosed: CLOSED,
|
|
Response: SEND_BODY,
|
|
# Special case: server sees client Request events, in this form
|
|
(Request, CLIENT): SEND_RESPONSE,
|
|
},
|
|
SEND_RESPONSE: {
|
|
InformationalResponse: SEND_RESPONSE,
|
|
Response: SEND_BODY,
|
|
(InformationalResponse, _SWITCH_UPGRADE): SWITCHED_PROTOCOL,
|
|
(Response, _SWITCH_CONNECT): SWITCHED_PROTOCOL,
|
|
},
|
|
SEND_BODY: {Data: SEND_BODY, EndOfMessage: DONE},
|
|
DONE: {ConnectionClosed: CLOSED},
|
|
MUST_CLOSE: {ConnectionClosed: CLOSED},
|
|
CLOSED: {ConnectionClosed: CLOSED},
|
|
SWITCHED_PROTOCOL: {},
|
|
ERROR: {},
|
|
},
|
|
}
|
|
|
|
StateTransitionType = Dict[
|
|
Tuple[Type[Sentinel], Type[Sentinel]], Dict[Type[Sentinel], Type[Sentinel]]
|
|
]
|
|
|
|
# NB: there are also some special-case state-triggered transitions hard-coded
|
|
# into _fire_state_triggered_transitions below.
|
|
STATE_TRIGGERED_TRANSITIONS: StateTransitionType = {
|
|
# (Client state, Server state) -> new states
|
|
# Protocol negotiation
|
|
(MIGHT_SWITCH_PROTOCOL, SWITCHED_PROTOCOL): {CLIENT: SWITCHED_PROTOCOL},
|
|
# Socket shutdown
|
|
(CLOSED, DONE): {SERVER: MUST_CLOSE},
|
|
(CLOSED, IDLE): {SERVER: MUST_CLOSE},
|
|
(ERROR, DONE): {SERVER: MUST_CLOSE},
|
|
(DONE, CLOSED): {CLIENT: MUST_CLOSE},
|
|
(IDLE, CLOSED): {CLIENT: MUST_CLOSE},
|
|
(DONE, ERROR): {CLIENT: MUST_CLOSE},
|
|
}
|
|
|
|
|
|
class ConnectionState:
|
|
def __init__(self) -> None:
|
|
# Extra bits of state that don't quite fit into the state model.
|
|
|
|
# If this is False then it enables the automatic DONE -> MUST_CLOSE
|
|
# transition. Don't set this directly; call .keep_alive_disabled()
|
|
self.keep_alive = True
|
|
|
|
# This is a subset of {UPGRADE, CONNECT}, containing the proposals
|
|
# made by the client for switching protocols.
|
|
self.pending_switch_proposals: Set[Type[Sentinel]] = set()
|
|
|
|
self.states: Dict[Type[Sentinel], Type[Sentinel]] = {CLIENT: IDLE, SERVER: IDLE}
|
|
|
|
def process_error(self, role: Type[Sentinel]) -> None:
|
|
self.states[role] = ERROR
|
|
self._fire_state_triggered_transitions()
|
|
|
|
def process_keep_alive_disabled(self) -> None:
|
|
self.keep_alive = False
|
|
self._fire_state_triggered_transitions()
|
|
|
|
def process_client_switch_proposal(self, switch_event: Type[Sentinel]) -> None:
|
|
self.pending_switch_proposals.add(switch_event)
|
|
self._fire_state_triggered_transitions()
|
|
|
|
def process_event(
|
|
self,
|
|
role: Type[Sentinel],
|
|
event_type: Type[Event],
|
|
server_switch_event: Optional[Type[Sentinel]] = None,
|
|
) -> None:
|
|
_event_type: Union[Type[Event], Tuple[Type[Event], Type[Sentinel]]] = event_type
|
|
if server_switch_event is not None:
|
|
assert role is SERVER
|
|
if server_switch_event not in self.pending_switch_proposals:
|
|
raise LocalProtocolError(
|
|
"Received server {} event without a pending proposal".format(
|
|
server_switch_event
|
|
)
|
|
)
|
|
_event_type = (event_type, server_switch_event)
|
|
if server_switch_event is None and _event_type is Response:
|
|
self.pending_switch_proposals = set()
|
|
self._fire_event_triggered_transitions(role, _event_type)
|
|
# Special case: the server state does get to see Request
|
|
# events.
|
|
if _event_type is Request:
|
|
assert role is CLIENT
|
|
self._fire_event_triggered_transitions(SERVER, (Request, CLIENT))
|
|
self._fire_state_triggered_transitions()
|
|
|
|
def _fire_event_triggered_transitions(
|
|
self,
|
|
role: Type[Sentinel],
|
|
event_type: Union[Type[Event], Tuple[Type[Event], Type[Sentinel]]],
|
|
) -> None:
|
|
state = self.states[role]
|
|
try:
|
|
new_state = EVENT_TRIGGERED_TRANSITIONS[role][state][event_type]
|
|
except KeyError:
|
|
event_type = cast(Type[Event], event_type)
|
|
raise LocalProtocolError(
|
|
"can't handle event type {} when role={} and state={}".format(
|
|
event_type.__name__, role, self.states[role]
|
|
)
|
|
) from None
|
|
self.states[role] = new_state
|
|
|
|
def _fire_state_triggered_transitions(self) -> None:
|
|
# We apply these rules repeatedly until converging on a fixed point
|
|
while True:
|
|
start_states = dict(self.states)
|
|
|
|
# It could happen that both these special-case transitions are
|
|
# enabled at the same time:
|
|
#
|
|
# DONE -> MIGHT_SWITCH_PROTOCOL
|
|
# DONE -> MUST_CLOSE
|
|
#
|
|
# For example, this will always be true of a HTTP/1.0 client
|
|
# requesting CONNECT. If this happens, the protocol switch takes
|
|
# priority. From there the client will either go to
|
|
# SWITCHED_PROTOCOL, in which case it's none of our business when
|
|
# they close the connection, or else the server will deny the
|
|
# request, in which case the client will go back to DONE and then
|
|
# from there to MUST_CLOSE.
|
|
if self.pending_switch_proposals:
|
|
if self.states[CLIENT] is DONE:
|
|
self.states[CLIENT] = MIGHT_SWITCH_PROTOCOL
|
|
|
|
if not self.pending_switch_proposals:
|
|
if self.states[CLIENT] is MIGHT_SWITCH_PROTOCOL:
|
|
self.states[CLIENT] = DONE
|
|
|
|
if not self.keep_alive:
|
|
for role in (CLIENT, SERVER):
|
|
if self.states[role] is DONE:
|
|
self.states[role] = MUST_CLOSE
|
|
|
|
# Tabular state-triggered transitions
|
|
joint_state = (self.states[CLIENT], self.states[SERVER])
|
|
changes = STATE_TRIGGERED_TRANSITIONS.get(joint_state, {})
|
|
self.states.update(changes)
|
|
|
|
if self.states == start_states:
|
|
# Fixed point reached
|
|
return
|
|
|
|
def start_next_cycle(self) -> None:
|
|
if self.states != {CLIENT: DONE, SERVER: DONE}:
|
|
raise LocalProtocolError(
|
|
"not in a reusable state. self.states={}".format(self.states)
|
|
)
|
|
# Can't reach DONE/DONE with any of these active, but still, let's be
|
|
# sure.
|
|
assert self.keep_alive
|
|
assert not self.pending_switch_proposals
|
|
self.states = {CLIENT: IDLE, SERVER: IDLE}
|