xray918's picture
Upload folder using huggingface_hub
0ad74ed verified
from __future__ import annotations
import logging
import os
import shutil
import sys
import tempfile
from email.message import Message
from enum import IntEnum
from io import BufferedRandom, BytesIO
from numbers import Number
from typing import TYPE_CHECKING, cast
from .decoders import Base64Decoder, QuotedPrintableDecoder
from .exceptions import FileError, FormParserError, MultipartParseError, QuerystringParseError
if TYPE_CHECKING: # pragma: no cover
from typing import Any, Callable, Literal, Protocol, TypedDict
from typing_extensions import TypeAlias
class SupportsRead(Protocol):
def read(self, __n: int) -> bytes: ...
class QuerystringCallbacks(TypedDict, total=False):
on_field_start: Callable[[], None]
on_field_name: Callable[[bytes, int, int], None]
on_field_data: Callable[[bytes, int, int], None]
on_field_end: Callable[[], None]
on_end: Callable[[], None]
class OctetStreamCallbacks(TypedDict, total=False):
on_start: Callable[[], None]
on_data: Callable[[bytes, int, int], None]
on_end: Callable[[], None]
class MultipartCallbacks(TypedDict, total=False):
on_part_begin: Callable[[], None]
on_part_data: Callable[[bytes, int, int], None]
on_part_end: Callable[[], None]
on_header_begin: Callable[[], None]
on_header_field: Callable[[bytes, int, int], None]
on_header_value: Callable[[bytes, int, int], None]
on_header_end: Callable[[], None]
on_headers_finished: Callable[[], None]
on_end: Callable[[], None]
class FormParserConfig(TypedDict):
UPLOAD_DIR: str | None
UPLOAD_KEEP_FILENAME: bool
UPLOAD_KEEP_EXTENSIONS: bool
UPLOAD_ERROR_ON_BAD_CTE: bool
MAX_MEMORY_FILE_SIZE: int
MAX_BODY_SIZE: float
class FileConfig(TypedDict, total=False):
UPLOAD_DIR: str | bytes | None
UPLOAD_DELETE_TMP: bool
UPLOAD_KEEP_FILENAME: bool
UPLOAD_KEEP_EXTENSIONS: bool
MAX_MEMORY_FILE_SIZE: int
class _FormProtocol(Protocol):
def write(self, data: bytes) -> int: ...
def finalize(self) -> None: ...
def close(self) -> None: ...
class FieldProtocol(_FormProtocol, Protocol):
def __init__(self, name: bytes | None) -> None: ...
def set_none(self) -> None: ...
class FileProtocol(_FormProtocol, Protocol):
def __init__(self, file_name: bytes | None, field_name: bytes | None, config: FileConfig) -> None: ...
OnFieldCallback = Callable[[FieldProtocol], None]
OnFileCallback = Callable[[FileProtocol], None]
CallbackName: TypeAlias = Literal[
"start",
"data",
"end",
"field_start",
"field_name",
"field_data",
"field_end",
"part_begin",
"part_data",
"part_end",
"header_begin",
"header_field",
"header_value",
"header_end",
"headers_finished",
]
# Unique missing object.
_missing = object()
class QuerystringState(IntEnum):
"""Querystring parser states.
These are used to keep track of the state of the parser, and are used to determine
what to do when new data is encountered.
"""
BEFORE_FIELD = 0
FIELD_NAME = 1
FIELD_DATA = 2
class MultipartState(IntEnum):
"""Multipart parser states.
These are used to keep track of the state of the parser, and are used to determine
what to do when new data is encountered.
"""
START = 0
START_BOUNDARY = 1
HEADER_FIELD_START = 2
HEADER_FIELD = 3
HEADER_VALUE_START = 4
HEADER_VALUE = 5
HEADER_VALUE_ALMOST_DONE = 6
HEADERS_ALMOST_DONE = 7
PART_DATA_START = 8
PART_DATA = 9
PART_DATA_END = 10
END = 11
# Flags for the multipart parser.
FLAG_PART_BOUNDARY = 1
FLAG_LAST_BOUNDARY = 2
# Get constants. Since iterating over a str on Python 2 gives you a 1-length
# string, but iterating over a bytes object on Python 3 gives you an integer,
# we need to save these constants.
CR = b"\r"[0]
LF = b"\n"[0]
COLON = b":"[0]
SPACE = b" "[0]
HYPHEN = b"-"[0]
AMPERSAND = b"&"[0]
SEMICOLON = b";"[0]
LOWER_A = b"a"[0]
LOWER_Z = b"z"[0]
NULL = b"\x00"[0]
# fmt: off
# Mask for ASCII characters that can be http tokens.
# Per RFC7230 - 3.2.6, this is all alpha-numeric characters
# and these: !#$%&'*+-.^_`|~
TOKEN_CHARS_SET = frozenset(
b"ABCDEFGHIJKLMNOPQRSTUVWXYZ"
b"abcdefghijklmnopqrstuvwxyz"
b"0123456789"
b"!#$%&'*+-.^_`|~")
# fmt: on
def parse_options_header(value: str | bytes | None) -> tuple[bytes, dict[bytes, bytes]]:
"""Parses a Content-Type header into a value in the following format: (content_type, {parameters})."""
# Uses email.message.Message to parse the header as described in PEP 594.
# Ref: https://peps.python.org/pep-0594/#cgi
if not value:
return (b"", {})
# If we are passed bytes, we assume that it conforms to WSGI, encoding in latin-1.
if isinstance(value, bytes): # pragma: no cover
value = value.decode("latin-1")
# For types
assert isinstance(value, str), "Value should be a string by now"
# If we have no options, return the string as-is.
if ";" not in value:
return (value.lower().strip().encode("latin-1"), {})
# Split at the first semicolon, to get our value and then options.
# ctype, rest = value.split(b';', 1)
message = Message()
message["content-type"] = value
params = message.get_params()
# If there were no parameters, this would have already returned above
assert params, "At least the content type value should be present"
ctype = params.pop(0)[0].encode("latin-1")
options: dict[bytes, bytes] = {}
for param in params:
key, value = param
# If the value returned from get_params() is a 3-tuple, the last
# element corresponds to the value.
# See: https://docs.python.org/3/library/email.compat32-message.html
if isinstance(value, tuple):
value = value[-1]
# If the value is a filename, we need to fix a bug on IE6 that sends
# the full file path instead of the filename.
if key == "filename":
if value[1:3] == ":\\" or value[:2] == "\\\\":
value = value.split("\\")[-1]
options[key.encode("latin-1")] = value.encode("latin-1")
return ctype, options
class Field:
"""A Field object represents a (parsed) form field. It represents a single
field with a corresponding name and value.
The name that a :class:`Field` will be instantiated with is the same name
that would be found in the following HTML::
<input name="name_goes_here" type="text"/>
This class defines two methods, :meth:`on_data` and :meth:`on_end`, that
will be called when data is written to the Field, and when the Field is
finalized, respectively.
Args:
name: The name of the form field.
"""
def __init__(self, name: bytes | None) -> None:
self._name = name
self._value: list[bytes] = []
# We cache the joined version of _value for speed.
self._cache = _missing
@classmethod
def from_value(cls, name: bytes, value: bytes | None) -> Field:
"""Create an instance of a :class:`Field`, and set the corresponding
value - either None or an actual value. This method will also
finalize the Field itself.
Args:
name: the name of the form field.
value: the value of the form field - either a bytestring or None.
Returns:
A new instance of a [`Field`][multipart.Field].
"""
f = cls(name)
if value is None:
f.set_none()
else:
f.write(value)
f.finalize()
return f
def write(self, data: bytes) -> int:
"""Write some data into the form field.
Args:
data: The data to write to the field.
Returns:
The number of bytes written.
"""
return self.on_data(data)
def on_data(self, data: bytes) -> int:
"""This method is a callback that will be called whenever data is
written to the Field.
Args:
data: The data to write to the field.
Returns:
The number of bytes written.
"""
self._value.append(data)
self._cache = _missing
return len(data)
def on_end(self) -> None:
"""This method is called whenever the Field is finalized."""
if self._cache is _missing:
self._cache = b"".join(self._value)
def finalize(self) -> None:
"""Finalize the form field."""
self.on_end()
def close(self) -> None:
"""Close the Field object. This will free any underlying cache."""
# Free our value array.
if self._cache is _missing:
self._cache = b"".join(self._value)
del self._value
def set_none(self) -> None:
"""Some fields in a querystring can possibly have a value of None - for
example, the string "foo&bar=&baz=asdf" will have a field with the
name "foo" and value None, one with name "bar" and value "", and one
with name "baz" and value "asdf". Since the write() interface doesn't
support writing None, this function will set the field value to None.
"""
self._cache = None
@property
def field_name(self) -> bytes | None:
"""This property returns the name of the field."""
return self._name
@property
def value(self) -> bytes | None:
"""This property returns the value of the form field."""
if self._cache is _missing:
self._cache = b"".join(self._value)
assert isinstance(self._cache, bytes) or self._cache is None
return self._cache
def __eq__(self, other: object) -> bool:
if isinstance(other, Field):
return self.field_name == other.field_name and self.value == other.value
else:
return NotImplemented
def __repr__(self) -> str:
if self.value is not None and len(self.value) > 97:
# We get the repr, and then insert three dots before the final
# quote.
v = repr(self.value[:97])[:-1] + "...'"
else:
v = repr(self.value)
return "{}(field_name={!r}, value={})".format(self.__class__.__name__, self.field_name, v)
class File:
"""This class represents an uploaded file. It handles writing file data to
either an in-memory file or a temporary file on-disk, if the optional
threshold is passed.
There are some options that can be passed to the File to change behavior
of the class. Valid options are as follows:
| Name | Type | Default | Description |
|-----------------------|-------|---------|-------------|
| UPLOAD_DIR | `str` | None | The directory to store uploaded files in. If this is None, a temporary file will be created in the system's standard location. |
| UPLOAD_DELETE_TMP | `bool`| True | Delete automatically created TMP file |
| UPLOAD_KEEP_FILENAME | `bool`| False | Whether or not to keep the filename of the uploaded file. If True, then the filename will be converted to a safe representation (e.g. by removing any invalid path segments), and then saved with the same name). Otherwise, a temporary name will be used. |
| UPLOAD_KEEP_EXTENSIONS| `bool`| False | Whether or not to keep the uploaded file's extension. If False, the file will be saved with the default temporary extension (usually ".tmp"). Otherwise, the file's extension will be maintained. Note that this will properly combine with the UPLOAD_KEEP_FILENAME setting. |
| MAX_MEMORY_FILE_SIZE | `int` | 1 MiB | The maximum number of bytes of a File to keep in memory. By default, the contents of a File are kept into memory until a certain limit is reached, after which the contents of the File are written to a temporary file. This behavior can be disabled by setting this value to an appropriately large value (or, for example, infinity, such as `float('inf')`. |
Args:
file_name: The name of the file that this [`File`][multipart.File] represents.
field_name: The name of the form field that this file was uploaded with. This can be None, if, for example,
the file was uploaded with Content-Type application/octet-stream.
config: The configuration for this File. See above for valid configuration keys and their corresponding values.
""" # noqa: E501
def __init__(self, file_name: bytes | None, field_name: bytes | None = None, config: FileConfig = {}) -> None:
# Save configuration, set other variables default.
self.logger = logging.getLogger(__name__)
self._config = config
self._in_memory = True
self._bytes_written = 0
self._fileobj: BytesIO | BufferedRandom = BytesIO()
# Save the provided field/file name.
self._field_name = field_name
self._file_name = file_name
# Our actual file name is None by default, since, depending on our
# config, we may not actually use the provided name.
self._actual_file_name: bytes | None = None
# Split the extension from the filename.
if file_name is not None:
base, ext = os.path.splitext(file_name)
self._file_base = base
self._ext = ext
@property
def field_name(self) -> bytes | None:
"""The form field associated with this file. May be None if there isn't
one, for example when we have an application/octet-stream upload.
"""
return self._field_name
@property
def file_name(self) -> bytes | None:
"""The file name given in the upload request."""
return self._file_name
@property
def actual_file_name(self) -> bytes | None:
"""The file name that this file is saved as. Will be None if it's not
currently saved on disk.
"""
return self._actual_file_name
@property
def file_object(self) -> BytesIO | BufferedRandom:
"""The file object that we're currently writing to. Note that this
will either be an instance of a :class:`io.BytesIO`, or a regular file
object.
"""
return self._fileobj
@property
def size(self) -> int:
"""The total size of this file, counted as the number of bytes that
currently have been written to the file.
"""
return self._bytes_written
@property
def in_memory(self) -> bool:
"""A boolean representing whether or not this file object is currently
stored in-memory or on-disk.
"""
return self._in_memory
def flush_to_disk(self) -> None:
"""If the file is already on-disk, do nothing. Otherwise, copy from
the in-memory buffer to a disk file, and then reassign our internal
file object to this new disk file.
Note that if you attempt to flush a file that is already on-disk, a
warning will be logged to this module's logger.
"""
if not self._in_memory:
self.logger.warning("Trying to flush to disk when we're not in memory")
return
# Go back to the start of our file.
self._fileobj.seek(0)
# Open a new file.
new_file = self._get_disk_file()
# Copy the file objects.
shutil.copyfileobj(self._fileobj, new_file)
# Seek to the new position in our new file.
new_file.seek(self._bytes_written)
# Reassign the fileobject.
old_fileobj = self._fileobj
self._fileobj = new_file
# We're no longer in memory.
self._in_memory = False
# Close the old file object.
old_fileobj.close()
def _get_disk_file(self) -> BufferedRandom:
"""This function is responsible for getting a file object on-disk for us."""
self.logger.info("Opening a file on disk")
file_dir = self._config.get("UPLOAD_DIR")
keep_filename = self._config.get("UPLOAD_KEEP_FILENAME", False)
keep_extensions = self._config.get("UPLOAD_KEEP_EXTENSIONS", False)
delete_tmp = self._config.get("UPLOAD_DELETE_TMP", True)
tmp_file: None | BufferedRandom = None
# If we have a directory and are to keep the filename...
if file_dir is not None and keep_filename:
self.logger.info("Saving with filename in: %r", file_dir)
# Build our filename.
# TODO: what happens if we don't have a filename?
fname = self._file_base + self._ext if keep_extensions else self._file_base
path = os.path.join(file_dir, fname) # type: ignore[arg-type]
try:
self.logger.info("Opening file: %r", path)
tmp_file = open(path, "w+b")
except OSError:
tmp_file = None
self.logger.exception("Error opening temporary file")
raise FileError("Error opening temporary file: %r" % path)
else:
# Build options array.
# Note that on Python 3, tempfile doesn't support byte names. We
# encode our paths using the default filesystem encoding.
suffix = self._ext.decode(sys.getfilesystemencoding()) if keep_extensions else None
if file_dir is None:
dir = None
elif isinstance(file_dir, bytes):
dir = file_dir.decode(sys.getfilesystemencoding())
else:
dir = file_dir # pragma: no cover
# Create a temporary (named) file with the appropriate settings.
self.logger.info(
"Creating a temporary file with options: %r", {"suffix": suffix, "delete": delete_tmp, "dir": dir}
)
try:
tmp_file = cast(BufferedRandom, tempfile.NamedTemporaryFile(suffix=suffix, delete=delete_tmp, dir=dir))
except OSError:
self.logger.exception("Error creating named temporary file")
raise FileError("Error creating named temporary file")
assert tmp_file is not None
# Encode filename as bytes.
if isinstance(tmp_file.name, str):
fname = tmp_file.name.encode(sys.getfilesystemencoding())
else:
fname = cast(bytes, tmp_file.name) # pragma: no cover
self._actual_file_name = fname
return tmp_file
def write(self, data: bytes) -> int:
"""Write some data to the File.
:param data: a bytestring
"""
return self.on_data(data)
def on_data(self, data: bytes) -> int:
"""This method is a callback that will be called whenever data is
written to the File.
Args:
data: The data to write to the file.
Returns:
The number of bytes written.
"""
bwritten = self._fileobj.write(data)
# If the bytes written isn't the same as the length, just return.
if bwritten != len(data):
self.logger.warning("bwritten != len(data) (%d != %d)", bwritten, len(data))
return bwritten
# Keep track of how many bytes we've written.
self._bytes_written += bwritten
# If we're in-memory and are over our limit, we create a file.
max_memory_file_size = self._config.get("MAX_MEMORY_FILE_SIZE")
if self._in_memory and max_memory_file_size is not None and (self._bytes_written > max_memory_file_size):
self.logger.info("Flushing to disk")
self.flush_to_disk()
# Return the number of bytes written.
return bwritten
def on_end(self) -> None:
"""This method is called whenever the Field is finalized."""
# Flush the underlying file object
self._fileobj.flush()
def finalize(self) -> None:
"""Finalize the form file. This will not close the underlying file,
but simply signal that we are finished writing to the File.
"""
self.on_end()
def close(self) -> None:
"""Close the File object. This will actually close the underlying
file object (whether it's a :class:`io.BytesIO` or an actual file
object).
"""
self._fileobj.close()
def __repr__(self) -> str:
return "{}(file_name={!r}, field_name={!r})".format(self.__class__.__name__, self.file_name, self.field_name)
class BaseParser:
"""This class is the base class for all parsers. It contains the logic for
calling and adding callbacks.
A callback can be one of two different forms. "Notification callbacks" are
callbacks that are called when something happens - for example, when a new
part of a multipart message is encountered by the parser. "Data callbacks"
are called when we get some sort of data - for example, part of the body of
a multipart chunk. Notification callbacks are called with no parameters,
whereas data callbacks are called with three, as follows::
data_callback(data, start, end)
The "data" parameter is a bytestring (i.e. "foo" on Python 2, or b"foo" on
Python 3). "start" and "end" are integer indexes into the "data" string
that represent the data of interest. Thus, in a data callback, the slice
`data[start:end]` represents the data that the callback is "interested in".
The callback is not passed a copy of the data, since copying severely hurts
performance.
"""
def __init__(self) -> None:
self.logger = logging.getLogger(__name__)
self.callbacks: QuerystringCallbacks | OctetStreamCallbacks | MultipartCallbacks = {}
def callback(
self, name: CallbackName, data: bytes | None = None, start: int | None = None, end: int | None = None
) -> None:
"""This function calls a provided callback with some data. If the
callback is not set, will do nothing.
Args:
name: The name of the callback to call (as a string).
data: Data to pass to the callback. If None, then it is assumed that the callback is a notification
callback, and no parameters are given.
end: An integer that is passed to the data callback.
start: An integer that is passed to the data callback.
"""
on_name = "on_" + name
func = self.callbacks.get(on_name)
if func is None:
return
func = cast("Callable[..., Any]", func)
# Depending on whether we're given a buffer...
if data is not None:
# Don't do anything if we have start == end.
if start is not None and start == end:
return
self.logger.debug("Calling %s with data[%d:%d]", on_name, start, end)
func(data, start, end)
else:
self.logger.debug("Calling %s with no data", on_name)
func()
def set_callback(self, name: CallbackName, new_func: Callable[..., Any] | None) -> None:
"""Update the function for a callback. Removes from the callbacks dict
if new_func is None.
:param name: The name of the callback to call (as a string).
:param new_func: The new function for the callback. If None, then the
callback will be removed (with no error if it does not
exist).
"""
if new_func is None:
self.callbacks.pop("on_" + name, None) # type: ignore[misc]
else:
self.callbacks["on_" + name] = new_func # type: ignore[literal-required]
def close(self) -> None:
pass # pragma: no cover
def finalize(self) -> None:
pass # pragma: no cover
def __repr__(self) -> str:
return "%s()" % self.__class__.__name__
class OctetStreamParser(BaseParser):
"""This parser parses an octet-stream request body and calls callbacks when
incoming data is received. Callbacks are as follows:
| Callback Name | Parameters | Description |
|----------------|-----------------|-----------------------------------------------------|
| on_start | None | Called when the first data is parsed. |
| on_data | data, start, end| Called for each data chunk that is parsed. |
| on_end | None | Called when the parser is finished parsing all data.|
Args:
callbacks: A dictionary of callbacks. See the documentation for [`BaseParser`][multipart.BaseParser].
max_size: The maximum size of body to parse. Defaults to infinity - i.e. unbounded.
"""
def __init__(self, callbacks: OctetStreamCallbacks = {}, max_size: float = float("inf")):
super().__init__()
self.callbacks = callbacks
self._started = False
if not isinstance(max_size, Number) or max_size < 1:
raise ValueError("max_size must be a positive number, not %r" % max_size)
self.max_size: int | float = max_size
self._current_size = 0
def write(self, data: bytes) -> int:
"""Write some data to the parser, which will perform size verification,
and then pass the data to the underlying callback.
Args:
data: The data to write to the parser.
Returns:
The number of bytes written.
"""
if not self._started:
self.callback("start")
self._started = True
# Truncate data length.
data_len = len(data)
if (self._current_size + data_len) > self.max_size:
# We truncate the length of data that we are to process.
new_size = int(self.max_size - self._current_size)
self.logger.warning(
"Current size is %d (max %d), so truncating data length from %d to %d",
self._current_size,
self.max_size,
data_len,
new_size,
)
data_len = new_size
# Increment size, then callback, in case there's an exception.
self._current_size += data_len
self.callback("data", data, 0, data_len)
return data_len
def finalize(self) -> None:
"""Finalize this parser, which signals to that we are finished parsing,
and sends the on_end callback.
"""
self.callback("end")
def __repr__(self) -> str:
return "%s()" % self.__class__.__name__
class QuerystringParser(BaseParser):
"""This is a streaming querystring parser. It will consume data, and call
the callbacks given when it has data.
| Callback Name | Parameters | Description |
|----------------|-----------------|-----------------------------------------------------|
| on_field_start | None | Called when a new field is encountered. |
| on_field_name | data, start, end| Called when a portion of a field's name is encountered. |
| on_field_data | data, start, end| Called when a portion of a field's data is encountered. |
| on_field_end | None | Called when the end of a field is encountered. |
| on_end | None | Called when the parser is finished parsing all data.|
Args:
callbacks: A dictionary of callbacks. See the documentation for [`BaseParser`][multipart.BaseParser].
strict_parsing: Whether or not to parse the body strictly. Defaults to False. If this is set to True, then the
behavior of the parser changes as the following: if a field has a value with an equal sign
(e.g. "foo=bar", or "foo="), it is always included. If a field has no equals sign (e.g. "...&name&..."),
it will be treated as an error if 'strict_parsing' is True, otherwise included. If an error is encountered,
then a [`QuerystringParseError`][multipart.exceptions.QuerystringParseError] will be raised.
max_size: The maximum size of body to parse. Defaults to infinity - i.e. unbounded.
""" # noqa: E501
state: QuerystringState
def __init__(
self, callbacks: QuerystringCallbacks = {}, strict_parsing: bool = False, max_size: float = float("inf")
) -> None:
super().__init__()
self.state = QuerystringState.BEFORE_FIELD
self._found_sep = False
self.callbacks = callbacks
# Max-size stuff
if not isinstance(max_size, Number) or max_size < 1:
raise ValueError("max_size must be a positive number, not %r" % max_size)
self.max_size: int | float = max_size
self._current_size = 0
# Should parsing be strict?
self.strict_parsing = strict_parsing
def write(self, data: bytes) -> int:
"""Write some data to the parser, which will perform size verification,
parse into either a field name or value, and then pass the
corresponding data to the underlying callback. If an error is
encountered while parsing, a QuerystringParseError will be raised. The
"offset" attribute of the raised exception will be set to the offset in
the input data chunk (NOT the overall stream) that caused the error.
Args:
data: The data to write to the parser.
Returns:
The number of bytes written.
"""
# Handle sizing.
data_len = len(data)
if (self._current_size + data_len) > self.max_size:
# We truncate the length of data that we are to process.
new_size = int(self.max_size - self._current_size)
self.logger.warning(
"Current size is %d (max %d), so truncating data length from %d to %d",
self._current_size,
self.max_size,
data_len,
new_size,
)
data_len = new_size
l = 0
try:
l = self._internal_write(data, data_len)
finally:
self._current_size += l
return l
def _internal_write(self, data: bytes, length: int) -> int:
state = self.state
strict_parsing = self.strict_parsing
found_sep = self._found_sep
i = 0
while i < length:
ch = data[i]
# Depending on our state...
if state == QuerystringState.BEFORE_FIELD:
# If the 'found_sep' flag is set, we've already encountered
# and skipped a single separator. If so, we check our strict
# parsing flag and decide what to do. Otherwise, we haven't
# yet reached a separator, and thus, if we do, we need to skip
# it as it will be the boundary between fields that's supposed
# to be there.
if ch == AMPERSAND or ch == SEMICOLON:
if found_sep:
# If we're parsing strictly, we disallow blank chunks.
if strict_parsing:
e = QuerystringParseError("Skipping duplicate ampersand/semicolon at %d" % i)
e.offset = i
raise e
else:
self.logger.debug("Skipping duplicate ampersand/semicolon at %d", i)
else:
# This case is when we're skipping the (first)
# separator between fields, so we just set our flag
# and continue on.
found_sep = True
else:
# Emit a field-start event, and go to that state. Also,
# reset the "found_sep" flag, for the next time we get to
# this state.
self.callback("field_start")
i -= 1
state = QuerystringState.FIELD_NAME
found_sep = False
elif state == QuerystringState.FIELD_NAME:
# Try and find a separator - we ensure that, if we do, we only
# look for the equal sign before it.
sep_pos = data.find(b"&", i)
if sep_pos == -1:
sep_pos = data.find(b";", i)
# See if we can find an equals sign in the remaining data. If
# so, we can immediately emit the field name and jump to the
# data state.
if sep_pos != -1:
equals_pos = data.find(b"=", i, sep_pos)
else:
equals_pos = data.find(b"=", i)
if equals_pos != -1:
# Emit this name.
self.callback("field_name", data, i, equals_pos)
# Jump i to this position. Note that it will then have 1
# added to it below, which means the next iteration of this
# loop will inspect the character after the equals sign.
i = equals_pos
state = QuerystringState.FIELD_DATA
else:
# No equals sign found.
if not strict_parsing:
# See also comments in the QuerystringState.FIELD_DATA case below.
# If we found the separator, we emit the name and just
# end - there's no data callback at all (not even with
# a blank value).
if sep_pos != -1:
self.callback("field_name", data, i, sep_pos)
self.callback("field_end")
i = sep_pos - 1
state = QuerystringState.BEFORE_FIELD
else:
# Otherwise, no separator in this block, so the
# rest of this chunk must be a name.
self.callback("field_name", data, i, length)
i = length
else:
# We're parsing strictly. If we find a separator,
# this is an error - we require an equals sign.
if sep_pos != -1:
e = QuerystringParseError(
"When strict_parsing is True, we require an "
"equals sign in all field chunks. Did not "
"find one in the chunk that starts at %d" % (i,)
)
e.offset = i
raise e
# No separator in the rest of this chunk, so it's just
# a field name.
self.callback("field_name", data, i, length)
i = length
elif state == QuerystringState.FIELD_DATA:
# Try finding either an ampersand or a semicolon after this
# position.
sep_pos = data.find(b"&", i)
if sep_pos == -1:
sep_pos = data.find(b";", i)
# If we found it, callback this bit as data and then go back
# to expecting to find a field.
if sep_pos != -1:
self.callback("field_data", data, i, sep_pos)
self.callback("field_end")
# Note that we go to the separator, which brings us to the
# "before field" state. This allows us to properly emit
# "field_start" events only when we actually have data for
# a field of some sort.
i = sep_pos - 1
state = QuerystringState.BEFORE_FIELD
# Otherwise, emit the rest as data and finish.
else:
self.callback("field_data", data, i, length)
i = length
else: # pragma: no cover (error case)
msg = "Reached an unknown state %d at %d" % (state, i)
self.logger.warning(msg)
e = QuerystringParseError(msg)
e.offset = i
raise e
i += 1
self.state = state
self._found_sep = found_sep
return len(data)
def finalize(self) -> None:
"""Finalize this parser, which signals to that we are finished parsing,
if we're still in the middle of a field, an on_field_end callback, and
then the on_end callback.
"""
# If we're currently in the middle of a field, we finish it.
if self.state == QuerystringState.FIELD_DATA:
self.callback("field_end")
self.callback("end")
def __repr__(self) -> str:
return "{}(strict_parsing={!r}, max_size={!r})".format(
self.__class__.__name__, self.strict_parsing, self.max_size
)
class MultipartParser(BaseParser):
"""This class is a streaming multipart/form-data parser.
| Callback Name | Parameters | Description |
|--------------------|-----------------|-------------|
| on_part_begin | None | Called when a new part of the multipart message is encountered. |
| on_part_data | data, start, end| Called when a portion of a part's data is encountered. |
| on_part_end | None | Called when the end of a part is reached. |
| on_header_begin | None | Called when we've found a new header in a part of a multipart message |
| on_header_field | data, start, end| Called each time an additional portion of a header is read (i.e. the part of the header that is before the colon; the "Foo" in "Foo: Bar"). |
| on_header_value | data, start, end| Called when we get data for a header. |
| on_header_end | None | Called when the current header is finished - i.e. we've reached the newline at the end of the header. |
| on_headers_finished| None | Called when all headers are finished, and before the part data starts. |
| on_end | None | Called when the parser is finished parsing all data. |
Args:
boundary: The multipart boundary. This is required, and must match what is given in the HTTP request - usually in the Content-Type header.
callbacks: A dictionary of callbacks. See the documentation for [`BaseParser`][multipart.BaseParser].
max_size: The maximum size of body to parse. Defaults to infinity - i.e. unbounded.
""" # noqa: E501
def __init__(
self, boundary: bytes | str, callbacks: MultipartCallbacks = {}, max_size: float = float("inf")
) -> None:
# Initialize parser state.
super().__init__()
self.state = MultipartState.START
self.index = self.flags = 0
self.callbacks = callbacks
if not isinstance(max_size, Number) or max_size < 1:
raise ValueError("max_size must be a positive number, not %r" % max_size)
self.max_size = max_size
self._current_size = 0
# Setup marks. These are used to track the state of data received.
self.marks: dict[str, int] = {}
# Save our boundary.
if isinstance(boundary, str): # pragma: no cover
boundary = boundary.encode("latin-1")
self.boundary = b"\r\n--" + boundary
def write(self, data: bytes) -> int:
"""Write some data to the parser, which will perform size verification,
and then parse the data into the appropriate location (e.g. header,
data, etc.), and pass this on to the underlying callback. If an error
is encountered, a MultipartParseError will be raised. The "offset"
attribute on the raised exception will be set to the offset of the byte
in the input chunk that caused the error.
Args:
data: The data to write to the parser.
Returns:
The number of bytes written.
"""
# Handle sizing.
data_len = len(data)
if (self._current_size + data_len) > self.max_size:
# We truncate the length of data that we are to process.
new_size = int(self.max_size - self._current_size)
self.logger.warning(
"Current size is %d (max %d), so truncating data length from %d to %d",
self._current_size,
self.max_size,
data_len,
new_size,
)
data_len = new_size
l = 0
try:
l = self._internal_write(data, data_len)
finally:
self._current_size += l
return l
def _internal_write(self, data: bytes, length: int) -> int:
# Get values from locals.
boundary = self.boundary
# Get our state, flags and index. These are persisted between calls to
# this function.
state = self.state
index = self.index
flags = self.flags
# Our index defaults to 0.
i = 0
# Set a mark.
def set_mark(name: str) -> None:
self.marks[name] = i
# Remove a mark.
def delete_mark(name: str, reset: bool = False) -> None:
self.marks.pop(name, None)
# Helper function that makes calling a callback with data easier. The
# 'remaining' parameter will callback from the marked value until the
# end of the buffer, and reset the mark, instead of deleting it. This
# is used at the end of the function to call our callbacks with any
# remaining data in this chunk.
def data_callback(name: CallbackName, end_i: int, remaining: bool = False) -> None:
marked_index = self.marks.get(name)
if marked_index is None:
return
# Otherwise, we call it from the mark to the current byte we're
# processing.
if end_i <= marked_index:
# There is no additional data to send.
pass
elif marked_index >= 0:
# We are emitting data from the local buffer.
self.callback(name, data, marked_index, end_i)
else:
# Some of the data comes from a partial boundary match.
# and requires look-behind.
# We need to use self.flags (and not flags) because we care about
# the state when we entered the loop.
lookbehind_len = -marked_index
if lookbehind_len <= len(boundary):
self.callback(name, boundary, 0, lookbehind_len)
elif self.flags & FLAG_PART_BOUNDARY:
lookback = boundary + b"\r\n"
self.callback(name, lookback, 0, lookbehind_len)
elif self.flags & FLAG_LAST_BOUNDARY:
lookback = boundary + b"--\r\n"
self.callback(name, lookback, 0, lookbehind_len)
else: # pragma: no cover (error case)
self.logger.warning("Look-back buffer error")
if end_i > 0:
self.callback(name, data, 0, end_i)
# If we're getting remaining data, we have got all the data we
# can be certain is not a boundary, leaving only a partial boundary match.
if remaining:
self.marks[name] = end_i - length
else:
self.marks.pop(name, None)
# For each byte...
while i < length:
c = data[i]
if state == MultipartState.START:
# Skip leading newlines
if c == CR or c == LF:
i += 1
self.logger.debug("Skipping leading CR/LF at %d", i)
continue
# index is used as in index into our boundary. Set to 0.
index = 0
# Move to the next state, but decrement i so that we re-process
# this character.
state = MultipartState.START_BOUNDARY
i -= 1
elif state == MultipartState.START_BOUNDARY:
# Check to ensure that the last 2 characters in our boundary
# are CRLF.
if index == len(boundary) - 2:
if c != CR:
# Error!
msg = "Did not find CR at end of boundary (%d)" % (i,)
self.logger.warning(msg)
e = MultipartParseError(msg)
e.offset = i
raise e
index += 1
elif index == len(boundary) - 2 + 1:
if c != LF:
msg = "Did not find LF at end of boundary (%d)" % (i,)
self.logger.warning(msg)
e = MultipartParseError(msg)
e.offset = i
raise e
# The index is now used for indexing into our boundary.
index = 0
# Callback for the start of a part.
self.callback("part_begin")
# Move to the next character and state.
state = MultipartState.HEADER_FIELD_START
else:
# Check to ensure our boundary matches
if c != boundary[index + 2]:
msg = "Expected boundary character %r, got %r at index %d" % (boundary[index + 2], c, index + 2)
self.logger.warning(msg)
e = MultipartParseError(msg)
e.offset = i
raise e
# Increment index into boundary and continue.
index += 1
elif state == MultipartState.HEADER_FIELD_START:
# Mark the start of a header field here, reset the index, and
# continue parsing our header field.
index = 0
# Set a mark of our header field.
set_mark("header_field")
# Notify that we're starting a header if the next character is
# not a CR; a CR at the beginning of the header will cause us
# to stop parsing headers in the MultipartState.HEADER_FIELD state,
# below.
if c != CR:
self.callback("header_begin")
# Move to parsing header fields.
state = MultipartState.HEADER_FIELD
i -= 1
elif state == MultipartState.HEADER_FIELD:
# If we've reached a CR at the beginning of a header, it means
# that we've reached the second of 2 newlines, and so there are
# no more headers to parse.
if c == CR and index == 0:
delete_mark("header_field")
state = MultipartState.HEADERS_ALMOST_DONE
i += 1
continue
# Increment our index in the header.
index += 1
# If we've reached a colon, we're done with this header.
if c == COLON:
# A 0-length header is an error.
if index == 1:
msg = "Found 0-length header at %d" % (i,)
self.logger.warning(msg)
e = MultipartParseError(msg)
e.offset = i
raise e
# Call our callback with the header field.
data_callback("header_field", i)
# Move to parsing the header value.
state = MultipartState.HEADER_VALUE_START
elif c not in TOKEN_CHARS_SET:
msg = "Found invalid character %r in header at %d" % (c, i)
self.logger.warning(msg)
e = MultipartParseError(msg)
e.offset = i
raise e
elif state == MultipartState.HEADER_VALUE_START:
# Skip leading spaces.
if c == SPACE:
i += 1
continue
# Mark the start of the header value.
set_mark("header_value")
# Move to the header-value state, reprocessing this character.
state = MultipartState.HEADER_VALUE
i -= 1
elif state == MultipartState.HEADER_VALUE:
# If we've got a CR, we're nearly done our headers. Otherwise,
# we do nothing and just move past this character.
if c == CR:
data_callback("header_value", i)
self.callback("header_end")
state = MultipartState.HEADER_VALUE_ALMOST_DONE
elif state == MultipartState.HEADER_VALUE_ALMOST_DONE:
# The last character should be a LF. If not, it's an error.
if c != LF:
msg = "Did not find LF character at end of header " "(found %r)" % (c,)
self.logger.warning(msg)
e = MultipartParseError(msg)
e.offset = i
raise e
# Move back to the start of another header. Note that if that
# state detects ANOTHER newline, it'll trigger the end of our
# headers.
state = MultipartState.HEADER_FIELD_START
elif state == MultipartState.HEADERS_ALMOST_DONE:
# We're almost done our headers. This is reached when we parse
# a CR at the beginning of a header, so our next character
# should be a LF, or it's an error.
if c != LF:
msg = f"Did not find LF at end of headers (found {c!r})"
self.logger.warning(msg)
e = MultipartParseError(msg)
e.offset = i
raise e
self.callback("headers_finished")
state = MultipartState.PART_DATA_START
elif state == MultipartState.PART_DATA_START:
# Mark the start of our part data.
set_mark("part_data")
# Start processing part data, including this character.
state = MultipartState.PART_DATA
i -= 1
elif state == MultipartState.PART_DATA:
# We're processing our part data right now. During this, we
# need to efficiently search for our boundary, since any data
# on any number of lines can be a part of the current data.
# Save the current value of our index. We use this in case we
# find part of a boundary, but it doesn't match fully.
prev_index = index
# Set up variables.
boundary_length = len(boundary)
data_length = length
# If our index is 0, we're starting a new part, so start our
# search.
if index == 0:
# The most common case is likely to be that the whole
# boundary is present in the buffer.
# Calling `find` is much faster than iterating here.
i0 = data.find(boundary, i, data_length)
if i0 >= 0:
# We matched the whole boundary string.
index = boundary_length - 1
i = i0 + boundary_length - 1
else:
# No match found for whole string.
# There may be a partial boundary at the end of the
# data, which the find will not match.
# Since the length should to be searched is limited to
# the boundary length, just perform a naive search.
i = max(i, data_length - boundary_length)
# Search forward until we either hit the end of our buffer,
# or reach a potential start of the boundary.
while i < data_length - 1 and data[i] != boundary[0]:
i += 1
c = data[i]
# Now, we have a couple of cases here. If our index is before
# the end of the boundary...
if index < boundary_length:
# If the character matches...
if boundary[index] == c:
# The current character matches, so continue!
index += 1
else:
index = 0
# Our index is equal to the length of our boundary!
elif index == boundary_length:
# First we increment it.
index += 1
# Now, if we've reached a newline, we need to set this as
# the potential end of our boundary.
if c == CR:
flags |= FLAG_PART_BOUNDARY
# Otherwise, if this is a hyphen, we might be at the last
# of all boundaries.
elif c == HYPHEN:
flags |= FLAG_LAST_BOUNDARY
# Otherwise, we reset our index, since this isn't either a
# newline or a hyphen.
else:
index = 0
# Our index is right after the part boundary, which should be
# a LF.
elif index == boundary_length + 1:
# If we're at a part boundary (i.e. we've seen a CR
# character already)...
if flags & FLAG_PART_BOUNDARY:
# We need a LF character next.
if c == LF:
# Unset the part boundary flag.
flags &= ~FLAG_PART_BOUNDARY
# We have identified a boundary, callback for any data before it.
data_callback("part_data", i - index)
# Callback indicating that we've reached the end of
# a part, and are starting a new one.
self.callback("part_end")
self.callback("part_begin")
# Move to parsing new headers.
index = 0
state = MultipartState.HEADER_FIELD_START
i += 1
continue
# We didn't find an LF character, so no match. Reset
# our index and clear our flag.
index = 0
flags &= ~FLAG_PART_BOUNDARY
# Otherwise, if we're at the last boundary (i.e. we've
# seen a hyphen already)...
elif flags & FLAG_LAST_BOUNDARY:
# We need a second hyphen here.
if c == HYPHEN:
# We have identified a boundary, callback for any data before it.
data_callback("part_data", i - index)
# Callback to end the current part, and then the
# message.
self.callback("part_end")
self.callback("end")
state = MultipartState.END
else:
# No match, so reset index.
index = 0
# Otherwise, our index is 0. If the previous index is not, it
# means we reset something, and we need to take the data we
# thought was part of our boundary and send it along as actual
# data.
if index == 0 and prev_index > 0:
# Overwrite our previous index.
prev_index = 0
# Re-consider the current character, since this could be
# the start of the boundary itself.
i -= 1
elif state == MultipartState.END:
# Do nothing and just consume a byte in the end state.
if c not in (CR, LF):
self.logger.warning("Consuming a byte '0x%x' in the end state", c) # pragma: no cover
else: # pragma: no cover (error case)
# We got into a strange state somehow! Just stop processing.
msg = "Reached an unknown state %d at %d" % (state, i)
self.logger.warning(msg)
e = MultipartParseError(msg)
e.offset = i
raise e
# Move to the next byte.
i += 1
# We call our callbacks with any remaining data. Note that we pass
# the 'remaining' flag, which sets the mark back to 0 instead of
# deleting it, if it's found. This is because, if the mark is found
# at this point, we assume that there's data for one of these things
# that has been parsed, but not yet emitted. And, as such, it implies
# that we haven't yet reached the end of this 'thing'. So, by setting
# the mark to 0, we cause any data callbacks that take place in future
# calls to this function to start from the beginning of that buffer.
data_callback("header_field", length, True)
data_callback("header_value", length, True)
data_callback("part_data", length - index, True)
# Save values to locals.
self.state = state
self.index = index
self.flags = flags
# Return our data length to indicate no errors, and that we processed
# all of it.
return length
def finalize(self) -> None:
"""Finalize this parser, which signals to that we are finished parsing.
Note: It does not currently, but in the future, it will verify that we
are in the final state of the parser (i.e. the end of the multipart
message is well-formed), and, if not, throw an error.
"""
# TODO: verify that we're in the state MultipartState.END, otherwise throw an
# error or otherwise state that we're not finished parsing.
pass
def __repr__(self) -> str:
return f"{self.__class__.__name__}(boundary={self.boundary!r})"
class FormParser:
"""This class is the all-in-one form parser. Given all the information
necessary to parse a form, it will instantiate the correct parser, create
the proper :class:`Field` and :class:`File` classes to store the data that
is parsed, and call the two given callbacks with each field and file as
they become available.
Args:
content_type: The Content-Type of the incoming request. This is used to select the appropriate parser.
on_field: The callback to call when a field has been parsed and is ready for usage. See above for parameters.
on_file: The callback to call when a file has been parsed and is ready for usage. See above for parameters.
on_end: An optional callback to call when all fields and files in a request has been parsed. Can be None.
boundary: If the request is a multipart/form-data request, this should be the boundary of the request, as given
in the Content-Type header, as a bytestring.
file_name: If the request is of type application/octet-stream, then the body of the request will not contain any
information about the uploaded file. In such cases, you can provide the file name of the uploaded file
manually.
FileClass: The class to use for uploaded files. Defaults to :class:`File`, but you can provide your own class
if you wish to customize behaviour. The class will be instantiated as FileClass(file_name, field_name), and
it must provide the following functions::
- file_instance.write(data)
- file_instance.finalize()
- file_instance.close()
FieldClass: The class to use for uploaded fields. Defaults to :class:`Field`, but you can provide your own
class if you wish to customize behaviour. The class will be instantiated as FieldClass(field_name), and it
must provide the following functions::
- field_instance.write(data)
- field_instance.finalize()
- field_instance.close()
- field_instance.set_none()
config: Configuration to use for this FormParser. The default values are taken from the DEFAULT_CONFIG value,
and then any keys present in this dictionary will overwrite the default values.
"""
#: This is the default configuration for our form parser.
#: Note: all file sizes should be in bytes.
DEFAULT_CONFIG: FormParserConfig = {
"MAX_BODY_SIZE": float("inf"),
"MAX_MEMORY_FILE_SIZE": 1 * 1024 * 1024,
"UPLOAD_DIR": None,
"UPLOAD_KEEP_FILENAME": False,
"UPLOAD_KEEP_EXTENSIONS": False,
# Error on invalid Content-Transfer-Encoding?
"UPLOAD_ERROR_ON_BAD_CTE": False,
}
def __init__(
self,
content_type: str,
on_field: OnFieldCallback | None,
on_file: OnFileCallback | None,
on_end: Callable[[], None] | None = None,
boundary: bytes | str | None = None,
file_name: bytes | None = None,
FileClass: type[FileProtocol] = File,
FieldClass: type[FieldProtocol] = Field,
config: dict[Any, Any] = {},
) -> None:
self.logger = logging.getLogger(__name__)
# Save variables.
self.content_type = content_type
self.boundary = boundary
self.bytes_received = 0
self.parser = None
# Save callbacks.
self.on_field = on_field
self.on_file = on_file
self.on_end = on_end
# Save classes.
self.FileClass = File
self.FieldClass = Field
# Set configuration options.
self.config: FormParserConfig = self.DEFAULT_CONFIG.copy()
self.config.update(config) # type: ignore[typeddict-item]
parser: OctetStreamParser | MultipartParser | QuerystringParser | None = None
# Depending on the Content-Type, we instantiate the correct parser.
if content_type == "application/octet-stream":
file: FileProtocol = None # type: ignore
def on_start() -> None:
nonlocal file
file = FileClass(file_name, None, config=cast("FileConfig", self.config))
def on_data(data: bytes, start: int, end: int) -> None:
nonlocal file
file.write(data[start:end])
def _on_end() -> None:
nonlocal file
# Finalize the file itself.
file.finalize()
# Call our callback.
if on_file:
on_file(file)
# Call the on-end callback.
if self.on_end is not None:
self.on_end()
# Instantiate an octet-stream parser
parser = OctetStreamParser(
callbacks={"on_start": on_start, "on_data": on_data, "on_end": _on_end},
max_size=self.config["MAX_BODY_SIZE"],
)
elif content_type == "application/x-www-form-urlencoded" or content_type == "application/x-url-encoded":
name_buffer: list[bytes] = []
f: FieldProtocol | None = None
def on_field_start() -> None:
pass
def on_field_name(data: bytes, start: int, end: int) -> None:
name_buffer.append(data[start:end])
def on_field_data(data: bytes, start: int, end: int) -> None:
nonlocal f
if f is None:
f = FieldClass(b"".join(name_buffer))
del name_buffer[:]
f.write(data[start:end])
def on_field_end() -> None:
nonlocal f
# Finalize and call callback.
if f is None:
# If we get here, it's because there was no field data.
# We create a field, set it to None, and then continue.
f = FieldClass(b"".join(name_buffer))
del name_buffer[:]
f.set_none()
f.finalize()
if on_field:
on_field(f)
f = None
def _on_end() -> None:
if self.on_end is not None:
self.on_end()
# Instantiate parser.
parser = QuerystringParser(
callbacks={
"on_field_start": on_field_start,
"on_field_name": on_field_name,
"on_field_data": on_field_data,
"on_field_end": on_field_end,
"on_end": _on_end,
},
max_size=self.config["MAX_BODY_SIZE"],
)
elif content_type == "multipart/form-data":
if boundary is None:
self.logger.error("No boundary given")
raise FormParserError("No boundary given")
header_name: list[bytes] = []
header_value: list[bytes] = []
headers: dict[bytes, bytes] = {}
f_multi: FileProtocol | FieldProtocol | None = None
writer = None
is_file = False
def on_part_begin() -> None:
# Reset headers in case this isn't the first part.
nonlocal headers
headers = {}
def on_part_data(data: bytes, start: int, end: int) -> None:
nonlocal writer
assert writer is not None
writer.write(data[start:end])
# TODO: check for error here.
def on_part_end() -> None:
nonlocal f_multi, is_file
assert f_multi is not None
f_multi.finalize()
if is_file:
if on_file:
on_file(f_multi)
else:
if on_field:
on_field(cast("FieldProtocol", f_multi))
def on_header_field(data: bytes, start: int, end: int) -> None:
header_name.append(data[start:end])
def on_header_value(data: bytes, start: int, end: int) -> None:
header_value.append(data[start:end])
def on_header_end() -> None:
headers[b"".join(header_name)] = b"".join(header_value)
del header_name[:]
del header_value[:]
def on_headers_finished() -> None:
nonlocal is_file, f_multi, writer
# Reset the 'is file' flag.
is_file = False
# Parse the content-disposition header.
# TODO: handle mixed case
content_disp = headers.get(b"Content-Disposition")
disp, options = parse_options_header(content_disp)
# Get the field and filename.
field_name = options.get(b"name")
file_name = options.get(b"filename")
# TODO: check for errors
# Create the proper class.
if file_name is None:
f_multi = FieldClass(field_name)
else:
f_multi = FileClass(file_name, field_name, config=cast("FileConfig", self.config))
is_file = True
# Parse the given Content-Transfer-Encoding to determine what
# we need to do with the incoming data.
# TODO: check that we properly handle 8bit / 7bit encoding.
transfer_encoding = headers.get(b"Content-Transfer-Encoding", b"7bit")
if transfer_encoding in (b"binary", b"8bit", b"7bit"):
writer = f_multi
elif transfer_encoding == b"base64":
writer = Base64Decoder(f_multi)
elif transfer_encoding == b"quoted-printable":
writer = QuotedPrintableDecoder(f_multi)
else:
self.logger.warning("Unknown Content-Transfer-Encoding: %r", transfer_encoding)
if self.config["UPLOAD_ERROR_ON_BAD_CTE"]:
raise FormParserError('Unknown Content-Transfer-Encoding "{!r}"'.format(transfer_encoding))
else:
# If we aren't erroring, then we just treat this as an
# unencoded Content-Transfer-Encoding.
writer = f_multi
def _on_end() -> None:
nonlocal writer
assert writer is not None
writer.finalize()
if self.on_end is not None:
self.on_end()
# Instantiate a multipart parser.
parser = MultipartParser(
boundary,
callbacks={
"on_part_begin": on_part_begin,
"on_part_data": on_part_data,
"on_part_end": on_part_end,
"on_header_field": on_header_field,
"on_header_value": on_header_value,
"on_header_end": on_header_end,
"on_headers_finished": on_headers_finished,
"on_end": _on_end,
},
max_size=self.config["MAX_BODY_SIZE"],
)
else:
self.logger.warning("Unknown Content-Type: %r", content_type)
raise FormParserError("Unknown Content-Type: {}".format(content_type))
self.parser = parser
def write(self, data: bytes) -> int:
"""Write some data. The parser will forward this to the appropriate
underlying parser.
Args:
data: The data to write.
Returns:
The number of bytes processed.
"""
self.bytes_received += len(data)
# TODO: check the parser's return value for errors?
assert self.parser is not None
return self.parser.write(data)
def finalize(self) -> None:
"""Finalize the parser."""
if self.parser is not None and hasattr(self.parser, "finalize"):
self.parser.finalize()
def close(self) -> None:
"""Close the parser."""
if self.parser is not None and hasattr(self.parser, "close"):
self.parser.close()
def __repr__(self) -> str:
return "{}(content_type={!r}, parser={!r})".format(self.__class__.__name__, self.content_type, self.parser)
def create_form_parser(
headers: dict[str, bytes],
on_field: OnFieldCallback | None,
on_file: OnFileCallback | None,
trust_x_headers: bool = False,
config: dict[Any, Any] = {},
) -> FormParser:
"""This function is a helper function to aid in creating a FormParser
instances. Given a dictionary-like headers object, it will determine
the correct information needed, instantiate a FormParser with the
appropriate values and given callbacks, and then return the corresponding
parser.
Args:
headers: A dictionary-like object of HTTP headers. The only required header is Content-Type.
on_field: Callback to call with each parsed field.
on_file: Callback to call with each parsed file.
trust_x_headers: Whether or not to trust information received from certain X-Headers - for example, the file
name from X-File-Name.
config: Configuration variables to pass to the FormParser.
"""
content_type: str | bytes | None = headers.get("Content-Type")
if content_type is None:
logging.getLogger(__name__).warning("No Content-Type header given")
raise ValueError("No Content-Type header given!")
# Boundaries are optional (the FormParser will raise if one is needed
# but not given).
content_type, params = parse_options_header(content_type)
boundary = params.get(b"boundary")
# We need content_type to be a string, not a bytes object.
content_type = content_type.decode("latin-1")
# File names are optional.
file_name = headers.get("X-File-Name")
# Instantiate a form parser.
form_parser = FormParser(content_type, on_field, on_file, boundary=boundary, file_name=file_name, config=config)
# Return our parser.
return form_parser
def parse_form(
headers: dict[str, bytes],
input_stream: SupportsRead,
on_field: OnFieldCallback | None,
on_file: OnFileCallback | None,
chunk_size: int = 1048576,
) -> None:
"""This function is useful if you just want to parse a request body,
without too much work. Pass it a dictionary-like object of the request's
headers, and a file-like object for the input stream, along with two
callbacks that will get called whenever a field or file is parsed.
Args:
headers: A dictionary-like object of HTTP headers. The only required header is Content-Type.
input_stream: A file-like object that represents the request body. The read() method must return bytestrings.
on_field: Callback to call with each parsed field.
on_file: Callback to call with each parsed file.
chunk_size: The maximum size to read from the input stream and write to the parser at one time.
Defaults to 1 MiB.
"""
# Create our form parser.
parser = create_form_parser(headers, on_field, on_file)
# Read chunks of 1MiB and write to the parser, but never read more than
# the given Content-Length, if any.
content_length: int | float | bytes | None = headers.get("Content-Length")
if content_length is not None:
content_length = int(content_length)
else:
content_length = float("inf")
bytes_read = 0
while True:
# Read only up to the Content-Length given.
max_readable = int(min(content_length - bytes_read, chunk_size))
buff = input_stream.read(max_readable)
# Write to the parser and update our length.
parser.write(buff)
bytes_read += len(buff)
# If we get a buffer that's smaller than the size requested, or if we
# have read up to our content length, we're done.
if len(buff) != max_readable or bytes_read == content_length:
break
# Tell our parser that we're done writing data.
parser.finalize()