docstring
stringlengths 52
499
| function
stringlengths 67
35.2k
| __index_level_0__
int64 52.6k
1.16M
|
---|---|---|
Copies a range of values to a new location in the data set.
Args:
reader: A FileRecordStream object with input data.
writer: A FileRecordStream object to write output data to.
start: The first row in the range to copy.
stop: The last row in the range to copy.
insertLocation: The location to insert the copied range. If not specified,
the range is inserted immediately following itself. | def copy(reader, writer, start, stop, insertLocation=None, tsCol=None):
assert stop >= start
startRows = []
copyRows = []
ts = None
inc = None
if tsCol is None:
tsCol = reader.getTimestampFieldIdx()
for i, row in enumerate(reader):
# Get the first timestamp and the increment.
if ts is None:
ts = row[tsCol]
elif inc is None:
inc = row[tsCol] - ts
# Keep a list of all rows and a list of rows to copy.
if i >= start and i <= stop:
copyRows.append(row)
startRows.append(row)
# Insert the copied rows.
if insertLocation is None:
insertLocation = stop + 1
startRows[insertLocation:insertLocation] = copyRows
# Update the timestamps.
for row in startRows:
row[tsCol] = ts
writer.appendRecord(row)
ts += inc | 109,800 |
Samples n rows.
Args:
reader: A FileRecordStream object with input data.
writer: A FileRecordStream object to write output data to.
n: The number of elements to sample.
start: The first row in the range to sample from.
stop: The last row in the range to sample from.
tsCol: If specified, the timestamp column to update.
writeSampleOnly: If False, the rows before start are written before the
sample and the rows after stop are written after the sample. | def sample(reader, writer, n, start=None, stop=None, tsCol=None,
writeSampleOnly=True):
rows = list(reader)
if tsCol is not None:
ts = rows[0][tsCol]
inc = rows[1][tsCol] - ts
if start is None:
start = 0
if stop is None:
stop = len(rows) - 1
initialN = stop - start + 1
# Select random rows in the sample range to delete until the desired number
# of rows are left.
numDeletes = initialN - n
for i in xrange(numDeletes):
delIndex = random.randint(start, stop - i)
del rows[delIndex]
# Remove outside rows if specified.
if writeSampleOnly:
rows = rows[start:start + n]
# Rewrite columns if tsCol is given.
if tsCol is not None:
ts = rows[0][tsCol]
# Write resulting rows.
for row in rows:
if tsCol is not None:
row[tsCol] = ts
ts += inc
writer.appendRecord(row) | 109,801 |
Very simple patterns. Each pattern has numOnes consecutive
bits on. The amount of overlap between consecutive patterns is
configurable, via the patternOverlap parameter.
Parameters:
-----------------------------------------------------------------------
numOnes: Number of bits ON in each pattern
numPatterns: Number of unique patterns to generate
patternOverlap: Number of bits of overlap between each successive pattern
retval: patterns | def getSimplePatterns(numOnes, numPatterns, patternOverlap=0):
assert (patternOverlap < numOnes)
# How many new bits are introduced in each successive pattern?
numNewBitsInEachPattern = numOnes - patternOverlap
numCols = numNewBitsInEachPattern * numPatterns + patternOverlap
p = []
for i in xrange(numPatterns):
x = numpy.zeros(numCols, dtype='float32')
startBit = i*numNewBitsInEachPattern
nextStartBit = startBit + numOnes
x[startBit:nextStartBit] = 1
p.append(x)
return p | 109,891 |
Create one or more TM instances, placing each into a dict keyed by
name.
Parameters:
------------------------------------------------------------------
retval: tms - dict of TM instances | def createTMs(includeCPP = True,
includePy = True,
numCols = 100,
cellsPerCol = 4,
activationThreshold = 3,
minThreshold = 3,
newSynapseCount = 3,
initialPerm = 0.6,
permanenceInc = 0.1,
permanenceDec = 0.0,
globalDecay = 0.0,
pamLength = 0,
checkSynapseConsistency = True,
maxInfBacktrack = 0,
maxLrnBacktrack = 0,
**kwargs
):
# Keep these fixed:
connectedPerm = 0.5
tms = dict()
if includeCPP:
if VERBOSITY >= 2:
print "Creating BacktrackingTMCPP instance"
cpp_tm = BacktrackingTMCPP(numberOfCols = numCols, cellsPerColumn = cellsPerCol,
initialPerm = initialPerm, connectedPerm = connectedPerm,
minThreshold = minThreshold, newSynapseCount = newSynapseCount,
permanenceInc = permanenceInc, permanenceDec = permanenceDec,
activationThreshold = activationThreshold,
globalDecay = globalDecay, burnIn = 1,
seed=SEED, verbosity=VERBOSITY,
checkSynapseConsistency = checkSynapseConsistency,
collectStats = True,
pamLength = pamLength,
maxInfBacktrack = maxInfBacktrack,
maxLrnBacktrack = maxLrnBacktrack,
)
# Ensure we are copying over learning states for TMDiff
cpp_tm.retrieveLearningStates = True
tms['CPP'] = cpp_tm
if includePy:
if VERBOSITY >= 2:
print "Creating PY TM instance"
py_tm = BacktrackingTM(numberOfCols = numCols,
cellsPerColumn = cellsPerCol,
initialPerm = initialPerm,
connectedPerm = connectedPerm,
minThreshold = minThreshold,
newSynapseCount = newSynapseCount,
permanenceInc = permanenceInc,
permanenceDec = permanenceDec,
activationThreshold = activationThreshold,
globalDecay = globalDecay, burnIn = 1,
seed=SEED, verbosity=VERBOSITY,
collectStats = True,
pamLength = pamLength,
maxInfBacktrack = maxInfBacktrack,
maxLrnBacktrack = maxLrnBacktrack,
)
tms['PY '] = py_tm
return tms | 109,894 |
Check for diffs among the TM instances in the passed in tms dict and
raise an assert if any are detected
Parameters:
---------------------------------------------------------------------
tms: dict of TM instances | def assertNoTMDiffs(tms):
if len(tms) == 1:
return
if len(tms) > 2:
raise "Not implemented for more than 2 TMs"
same = fdrutils.tmDiff2(tms.values(), verbosity=VERBOSITY)
assert(same)
return | 109,895 |
Configure a plugin.
Args:
name (string): The name of the plugin. The ``:latest`` tag is
optional, and is the default if omitted.
options (dict): A key-value mapping of options
Returns:
``True`` if successful | def configure_plugin(self, name, options):
url = self._url('/plugins/{0}/set', name)
data = options
if isinstance(data, dict):
data = ['{0}={1}'.format(k, v) for k, v in six.iteritems(data)]
res = self._post_json(url, data=data)
self._raise_for_status(res)
return True | 111,065 |
Disable an installed plugin.
Args:
name (string): The name of the plugin. The ``:latest`` tag is
optional, and is the default if omitted.
Returns:
``True`` if successful | def disable_plugin(self, name):
url = self._url('/plugins/{0}/disable', name)
res = self._post(url)
self._raise_for_status(res)
return True | 111,067 |
Enable an installed plugin.
Args:
name (string): The name of the plugin. The ``:latest`` tag is
optional, and is the default if omitted.
timeout (int): Operation timeout (in seconds). Default: 0
Returns:
``True`` if successful | def enable_plugin(self, name, timeout=0):
url = self._url('/plugins/{0}/enable', name)
params = {'timeout': timeout}
res = self._post(url, params=params)
self._raise_for_status(res)
return True | 111,068 |
Retrieve plugin metadata.
Args:
name (string): The name of the plugin. The ``:latest`` tag is
optional, and is the default if omitted.
Returns:
A dict containing plugin info | def inspect_plugin(self, name):
url = self._url('/plugins/{0}/json', name)
return self._result(self._get(url), True) | 111,069 |
Retrieve list of privileges to be granted to a plugin.
Args:
name (string): Name of the remote plugin to examine. The
``:latest`` tag is optional, and is the default if omitted.
Returns:
A list of dictionaries representing the plugin's
permissions | def plugin_privileges(self, name):
params = {
'remote': name,
}
headers = {}
registry, repo_name = auth.resolve_repository_name(name)
header = auth.get_config_header(self, registry)
if header:
headers['X-Registry-Auth'] = header
url = self._url('/plugins/privileges')
return self._result(
self._get(url, params=params, headers=headers), True
) | 111,071 |
Push a plugin to the registry.
Args:
name (string): Name of the plugin to upload. The ``:latest``
tag is optional, and is the default if omitted.
Returns:
``True`` if successful | def push_plugin(self, name):
url = self._url('/plugins/{0}/pull', name)
headers = {}
registry, repo_name = auth.resolve_repository_name(name)
header = auth.get_config_header(self, registry)
if header:
headers['X-Registry-Auth'] = header
res = self._post(url, headers=headers)
self._raise_for_status(res)
return self._stream_helper(res, decode=True) | 111,072 |
Remove an installed plugin.
Args:
name (string): Name of the plugin to remove. The ``:latest``
tag is optional, and is the default if omitted.
force (bool): Disable the plugin before removing. This may
result in issues if the plugin is in use by a container.
Returns:
``True`` if successful | def remove_plugin(self, name, force=False):
url = self._url('/plugins/{0}', name)
res = self._delete(url, params={'force': force})
self._raise_for_status(res)
return True | 111,073 |
Iterate through frames read from the socket and return the result.
Args:
demux (bool):
If False, stdout and stderr are multiplexed, and the result is the
concatenation of all the frames. If True, the streams are
demultiplexed, and the result is a 2-tuple where each item is the
concatenation of frames belonging to the same stream. | def consume_socket_output(frames, demux=False):
if demux is False:
# If the streams are multiplexed, the generator returns strings, that
# we just need to concatenate.
return six.binary_type().join(frames)
# If the streams are demultiplexed, the generator yields tuples
# (stdout, stderr)
out = [None, None]
for frame in frames:
# It is guaranteed that for each frame, one and only one stream
# is not None.
assert frame != (None, None)
if frame[0] is not None:
if out[0] is None:
out[0] = frame[0]
else:
out[0] += frame[0]
else:
if out[1] is None:
out[1] = frame[1]
else:
out[1] += frame[1]
return tuple(out) | 111,085 |
Retrieve low-level information about a swarm node
Args:
node_id (string): ID of the node to be inspected.
Returns:
A dictionary containing data about this node.
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error. | def inspect_node(self, node_id):
url = self._url('/nodes/{0}', node_id)
return self._result(self._get(url), True) | 111,094 |
Leave a swarm.
Args:
force (bool): Leave the swarm even if this node is a manager.
Default: ``False``
Returns:
``True`` if the request went through.
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error. | def leave_swarm(self, force=False):
url = self._url('/swarm/leave')
response = self._post(url, params={'force': force})
# Ignore "this node is not part of a swarm" error
if force and response.status_code == http_client.NOT_ACCEPTABLE:
return True
# FIXME: Temporary workaround for 1.13.0-rc bug
# https://github.com/docker/docker/issues/29192
if force and response.status_code == http_client.SERVICE_UNAVAILABLE:
return True
self._raise_for_status(response)
return True | 111,096 |
Remove a node from the swarm.
Args:
node_id (string): ID of the node to be removed.
force (bool): Force remove an active node. Default: `False`
Raises:
:py:class:`docker.errors.NotFound`
If the node referenced doesn't exist in the swarm.
:py:class:`docker.errors.APIError`
If the server returns an error.
Returns:
`True` if the request was successful. | def remove_node(self, node_id, force=False):
url = self._url('/nodes/{0}', node_id)
params = {
'force': force
}
res = self._delete(url, params=params)
self._raise_for_status(res)
return True | 111,097 |
Get a secret.
Args:
secret_id (str): Secret ID.
Returns:
(:py:class:`Secret`): The secret.
Raises:
:py:class:`docker.errors.NotFound`
If the secret does not exist.
:py:class:`docker.errors.APIError`
If the server returns an error. | def get(self, secret_id):
return self.prepare_model(self.client.api.inspect_secret(secret_id)) | 111,114 |
List secrets. Similar to the ``docker secret ls`` command.
Args:
filters (dict): Server-side list filtering options.
Returns:
(list of :py:class:`Secret`): The secrets.
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error. | def list(self, **kwargs):
resp = self.client.api.secrets(**kwargs)
return [self.prepare_model(obj) for obj in resp] | 111,115 |
Remove a network. Similar to the ``docker network rm`` command.
Args:
net_id (str): The network's id | def remove_network(self, net_id):
url = self._url("/networks/{0}", net_id)
res = self._delete(url)
self._raise_for_status(res) | 111,118 |
Get detailed information about a network.
Args:
net_id (str): ID of network
verbose (bool): Show the service details across the cluster in
swarm mode.
scope (str): Filter the network by scope (``swarm``, ``global``
or ``local``). | def inspect_network(self, net_id, verbose=None, scope=None):
params = {}
if verbose is not None:
if version_lt(self._version, '1.28'):
raise InvalidVersion('verbose was introduced in API 1.28')
params['verbose'] = verbose
if scope is not None:
if version_lt(self._version, '1.31'):
raise InvalidVersion('scope was introduced in API 1.31')
params['scope'] = scope
url = self._url("/networks/{0}", net_id)
res = self._get(url, params=params)
return self._result(res, json=True) | 111,119 |
Disconnect a container from a network.
Args:
container (str): container ID or name to be disconnected from the
network
net_id (str): network ID
force (bool): Force the container to disconnect from a network.
Default: ``False`` | def disconnect_container_from_network(self, container, net_id,
force=False):
data = {"Container": container}
if force:
if version_lt(self._version, '1.22'):
raise InvalidVersion(
'Forced disconnect was introduced in API 1.22'
)
data['Force'] = force
url = self._url("/networks/{0}/disconnect", net_id)
res = self._post_json(url, data=data)
self._raise_for_status(res) | 111,121 |
Retrieve volume info by name.
Args:
name (str): volume name
Returns:
(dict): Volume information dictionary
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
Example:
>>> cli.inspect_volume('foobar')
{u'Driver': u'local',
u'Mountpoint': u'/var/lib/docker/volumes/foobar/_data',
u'Name': u'foobar'} | def inspect_volume(self, name):
url = self._url('/volumes/{0}', name)
return self._result(self._get(url), True) | 111,128 |
Delete unused volumes
Args:
filters (dict): Filters to process on the prune list.
Returns:
(dict): A dict containing a list of deleted volume names and
the amount of disk space reclaimed in bytes.
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error. | def prune_volumes(self, filters=None):
params = {}
if filters:
params['filters'] = utils.convert_filters(filters)
url = self._url('/volumes/prune')
return self._result(self._post(url, params=params), True) | 111,129 |
Remove a volume. Similar to the ``docker volume rm`` command.
Args:
name (str): The volume's name
force (bool): Force removal of volumes that were already removed
out of band by the volume driver plugin.
Raises:
:py:class:`docker.errors.APIError`
If volume failed to remove. | def remove_volume(self, name, force=False):
params = {}
if force:
if utils.version_lt(self._version, '1.25'):
raise errors.InvalidVersion(
'force removal was introduced in API 1.25'
)
params = {'force': force}
url = self._url('/volumes/{0}', name, params=params)
resp = self._delete(url)
self._raise_for_status(resp) | 111,130 |
Create a config
Args:
name (string): Name of the config
data (bytes): Config data to be stored
labels (dict): A mapping of labels to assign to the config
Returns (dict): ID of the newly created config | def create_config(self, name, data, labels=None):
if not isinstance(data, bytes):
data = data.encode('utf-8')
data = base64.b64encode(data)
if six.PY3:
data = data.decode('ascii')
body = {
'Data': data,
'Name': name,
'Labels': labels
}
url = self._url('/configs/create')
return self._result(
self._post_json(url, data=body), True
) | 111,131 |
Retrieve config metadata
Args:
id (string): Full ID of the config to inspect
Returns (dict): A dictionary of metadata
Raises:
:py:class:`docker.errors.NotFound`
if no config with that ID exists | def inspect_config(self, id):
url = self._url('/configs/{0}', id)
return self._result(self._get(url), True) | 111,132 |
Remove a config
Args:
id (string): Full ID of the config to remove
Returns (boolean): True if successful
Raises:
:py:class:`docker.errors.NotFound`
if no config with that ID exists | def remove_config(self, id):
url = self._url('/configs/{0}', id)
res = self._delete(url)
self._raise_for_status(res)
return True | 111,133 |
List configs
Args:
filters (dict): A map of filters to process on the configs
list. Available filters: ``names``
Returns (list): A list of configs | def configs(self, filters=None):
url = self._url('/configs')
params = {}
if filters:
params['filters'] = utils.convert_filters(filters)
return self._result(self._get(url, params=params), True) | 111,134 |
Create a secret
Args:
name (string): Name of the secret
data (bytes): Secret data to be stored
labels (dict): A mapping of labels to assign to the secret
driver (DriverConfig): A custom driver configuration. If
unspecified, the default ``internal`` driver will be used
Returns (dict): ID of the newly created secret | def create_secret(self, name, data, labels=None, driver=None):
if not isinstance(data, bytes):
data = data.encode('utf-8')
data = base64.b64encode(data)
if six.PY3:
data = data.decode('ascii')
body = {
'Data': data,
'Name': name,
'Labels': labels
}
if driver is not None:
if utils.version_lt(self._version, '1.31'):
raise errors.InvalidVersion(
'Secret driver is only available for API version > 1.31'
)
body['Driver'] = driver
url = self._url('/secrets/create')
return self._result(
self._post_json(url, data=body), True
) | 111,141 |
Retrieve secret metadata
Args:
id (string): Full ID of the secret to remove
Returns (dict): A dictionary of metadata
Raises:
:py:class:`docker.errors.NotFound`
if no secret with that ID exists | def inspect_secret(self, id):
url = self._url('/secrets/{0}', id)
return self._result(self._get(url), True) | 111,142 |
Remove this node from the swarm.
Args:
force (bool): Force remove an active node. Default: `False`
Returns:
`True` if the request was successful.
Raises:
:py:class:`docker.errors.NotFound`
If the node doesn't exist in the swarm.
:py:class:`docker.errors.APIError`
If the server returns an error. | def remove(self, force=False):
return self.client.api.remove_node(self.id, force=force) | 111,145 |
Get a node.
Args:
node_id (string): ID of the node to be inspected.
Returns:
A :py:class:`Node` object.
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error. | def get(self, node_id):
return self.prepare_model(self.client.api.inspect_node(node_id)) | 111,146 |
List swarm nodes.
Args:
filters (dict): Filters to process on the nodes list. Valid
filters: ``id``, ``name``, ``membership`` and ``role``.
Default: ``None``
Returns:
A list of :py:class:`Node` objects.
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
Example:
>>> client.nodes.list(filters={'role': 'manager'}) | def list(self, *args, **kwargs):
return [
self.prepare_model(n)
for n in self.client.api.nodes(*args, **kwargs)
] | 111,147 |
Update the plugin's settings.
Args:
options (dict): A key-value mapping of options.
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error. | def configure(self, options):
self.client.api.configure_plugin(self.name, options)
self.reload() | 111,150 |
Enable the plugin.
Args:
timeout (int): Timeout in seconds. Default: 0
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error. | def enable(self, timeout=0):
self.client.api.enable_plugin(self.name, timeout)
self.reload() | 111,152 |
Remove the plugin from the server.
Args:
force (bool): Remove even if the plugin is enabled.
Default: False
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error. | def remove(self, force=False):
return self.client.api.remove_plugin(self.name, force=force) | 111,153 |
Upgrade the plugin.
Args:
remote (string): Remote reference to upgrade to. The
``:latest`` tag is optional and is the default if omitted.
Default: this plugin's name.
Returns:
A generator streaming the decoded API logs | def upgrade(self, remote=None):
if self.enabled:
raise errors.DockerError(
'Plugin must be disabled before upgrading.'
)
if remote is None:
remote = self.name
privileges = self.client.api.plugin_privileges(remote)
for d in self.client.api.upgrade_plugin(self.name, remote, privileges):
yield d
self._reload() | 111,154 |
Gets a plugin.
Args:
name (str): The name of the plugin.
Returns:
(:py:class:`Plugin`): The plugin.
Raises:
:py:class:`docker.errors.NotFound` If the plugin does not
exist.
:py:class:`docker.errors.APIError`
If the server returns an error. | def get(self, name):
return self.prepare_model(self.client.api.inspect_plugin(name)) | 111,156 |
Return information about a service.
Args:
service (str): Service name or ID.
insert_defaults (boolean): If true, default values will be merged
into the service inspect output.
Returns:
(dict): A dictionary of the server-side representation of the
service, including all relevant properties.
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error. | def inspect_service(self, service, insert_defaults=None):
url = self._url('/services/{0}', service)
params = {}
if insert_defaults is not None:
if utils.version_lt(self._version, '1.29'):
raise errors.InvalidVersion(
'insert_defaults is not supported in API version < 1.29'
)
params['insertDefaults'] = insert_defaults
return self._result(self._get(url, params=params), True) | 111,172 |
Retrieve information about a task.
Args:
task (str): Task ID
Returns:
(dict): Information about the task.
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error. | def inspect_task(self, task):
url = self._url('/tasks/{0}', task)
return self._result(self._get(url), True) | 111,173 |
Stop and remove a service.
Args:
service (str): Service name or ID
Returns:
``True`` if successful.
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error. | def remove_service(self, service):
url = self._url('/services/{0}', service)
resp = self._delete(url)
self._raise_for_status(resp)
return True | 111,174 |
Remove this volume.
Args:
force (bool): Force removal of volumes that were already removed
out of band by the volume driver plugin.
Raises:
:py:class:`docker.errors.APIError`
If volume failed to remove. | def remove(self, force=False):
return self.client.api.remove_volume(self.id, force=force) | 111,191 |
Get a volume.
Args:
volume_id (str): Volume name.
Returns:
(:py:class:`Volume`): The volume.
Raises:
:py:class:`docker.errors.NotFound`
If the volume does not exist.
:py:class:`docker.errors.APIError`
If the server returns an error. | def get(self, volume_id):
return self.prepare_model(self.client.api.inspect_volume(volume_id)) | 111,193 |
List volumes. Similar to the ``docker volume ls`` command.
Args:
filters (dict): Server-side list filtering options.
Returns:
(list of :py:class:`Volume`): The volumes.
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error. | def list(self, **kwargs):
resp = self.client.api.volumes(**kwargs)
if not resp.get('Volumes'):
return []
return [self.prepare_model(obj) for obj in resp['Volumes']] | 111,194 |
Get a config.
Args:
config_id (str): Config ID.
Returns:
(:py:class:`Config`): The config.
Raises:
:py:class:`docker.errors.NotFound`
If the config does not exist.
:py:class:`docker.errors.APIError`
If the server returns an error. | def get(self, config_id):
return self.prepare_model(self.client.api.inspect_config(config_id)) | 111,200 |
Show the history of an image.
Args:
image (str): The image to show history for
Returns:
(str): The history of the image
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error. | def history(self, image):
res = self._get(self._url("/images/{0}/history", image))
return self._result(res, True) | 111,220 |
Like :py:meth:`~docker.api.image.ImageApiMixin.import_image`, but
allows importing in-memory bytes data.
Args:
data (bytes collection): Bytes collection containing valid tar data
repository (str): The repository to create
tag (str): The tag to apply | def import_image_from_data(self, data, repository=None, tag=None,
changes=None):
u = self._url('/images/create')
params = _import_image_params(
repository, tag, src='-', changes=changes
)
headers = {'Content-Type': 'application/tar'}
return self._result(
self._post(
u, data=data, params=params, headers=headers, timeout=None
)
) | 111,223 |
Like :py:meth:`~docker.api.image.ImageApiMixin.import_image`, but only
supports importing from a tar file on disk.
Args:
filename (str): Full path to a tar file.
repository (str): The repository to create
tag (str): The tag to apply
Raises:
IOError: File does not exist. | def import_image_from_file(self, filename, repository=None, tag=None,
changes=None):
return self.import_image(
src=filename, repository=repository, tag=tag, changes=changes
) | 111,224 |
Like :py:meth:`~docker.api.image.ImageApiMixin.import_image`, but only
supports importing from a URL.
Args:
url (str): A URL pointing to a tar file.
repository (str): The repository to create
tag (str): The tag to apply | def import_image_from_url(self, url, repository=None, tag=None,
changes=None):
return self.import_image(
src=url, repository=repository, tag=tag, changes=changes
) | 111,226 |
Like :py:meth:`~docker.api.image.ImageApiMixin.import_image`, but only
supports importing from another image, like the ``FROM`` Dockerfile
parameter.
Args:
image (str): Image name to import from
repository (str): The repository to create
tag (str): The tag to apply | def import_image_from_image(self, image, repository=None, tag=None,
changes=None):
return self.import_image(
image=image, repository=repository, tag=tag, changes=changes
) | 111,227 |
Get detailed information about an image. Similar to the ``docker
inspect`` command, but only for images.
Args:
image (str): The image to inspect
Returns:
(dict): Similar to the output of ``docker inspect``, but as a
single dict
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error. | def inspect_image(self, image):
return self._result(
self._get(self._url("/images/{0}/json", image)), True
) | 111,228 |
Remove an image. Similar to the ``docker rmi`` command.
Args:
image (str): The image to remove
force (bool): Force removal of the image
noprune (bool): Do not delete untagged parents | def remove_image(self, image, force=False, noprune=False):
params = {'force': force, 'noprune': noprune}
res = self._delete(self._url("/images/{0}", image), params=params)
return self._result(res, True) | 111,233 |
Search for images on Docker Hub. Similar to the ``docker search``
command.
Args:
term (str): A term to search for.
Returns:
(list of dicts): The response of the search.
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error. | def search(self, term):
return self._result(
self._get(self._url("/images/search"), params={'term': term}),
True
) | 111,234 |
Inspect changes on a container's filesystem.
Args:
container (str): The container to diff
Returns:
(str)
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error. | def diff(self, container):
return self._result(
self._get(self._url("/containers/{0}/changes", container)), True
) | 111,249 |
Identical to the `docker inspect` command, but only for containers.
Args:
container (str): The container to inspect
Returns:
(dict): Similar to the output of `docker inspect`, but as a
single dict
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error. | def inspect_container(self, container):
return self._result(
self._get(self._url("/containers/{0}/json", container)), True
) | 111,252 |
Kill a container or send a signal to a container.
Args:
container (str): The container to kill
signal (str or int): The signal to send. Defaults to ``SIGKILL``
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error. | def kill(self, container, signal=None):
url = self._url("/containers/{0}/kill", container)
params = {}
if signal is not None:
if not isinstance(signal, six.string_types):
signal = int(signal)
params['signal'] = signal
res = self._post(url, params=params)
self._raise_for_status(res) | 111,253 |
Pauses all processes within a container.
Args:
container (str): The container to pause
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error. | def pause(self, container):
url = self._url('/containers/{0}/pause', container)
res = self._post(url)
self._raise_for_status(res) | 111,255 |
Rename a container. Similar to the ``docker rename`` command.
Args:
container (str): ID of the container to rename
name (str): New name for the container
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error. | def rename(self, container, name):
url = self._url("/containers/{0}/rename", container)
params = {'name': name}
res = self._post(url, params=params)
self._raise_for_status(res) | 111,259 |
Resize the tty session.
Args:
container (str or dict): The container to resize
height (int): Height of tty session
width (int): Width of tty session
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error. | def resize(self, container, height, width):
params = {'h': height, 'w': width}
url = self._url("/containers/{0}/resize", container)
res = self._post(url, params=params)
self._raise_for_status(res) | 111,260 |
Restart a container. Similar to the ``docker restart`` command.
Args:
container (str or dict): The container to restart. If a dict, the
``Id`` key is used.
timeout (int): Number of seconds to try to stop for before killing
the container. Once killed it will then be restarted. Default
is 10 seconds.
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error. | def restart(self, container, timeout=10):
params = {'t': timeout}
url = self._url("/containers/{0}/restart", container)
conn_timeout = self.timeout
if conn_timeout is not None:
conn_timeout += timeout
res = self._post(url, params=params, timeout=conn_timeout)
self._raise_for_status(res) | 111,261 |
Display the running processes of a container.
Args:
container (str): The container to inspect
ps_args (str): An optional arguments passed to ps (e.g. ``aux``)
Returns:
(str): The output of the top
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error. | def top(self, container, ps_args=None):
u = self._url("/containers/{0}/top", container)
params = {}
if ps_args is not None:
params['ps_args'] = ps_args
return self._result(self._get(u, params=params), True) | 111,264 |
Unpause all processes within a container.
Args:
container (str): The container to unpause | def unpause(self, container):
url = self._url('/containers/{0}/unpause', container)
res = self._post(url)
self._raise_for_status(res) | 111,265 |
Tag this image into a repository. Similar to the ``docker tag``
command.
Args:
repository (str): The repository to set for the tag
tag (str): The tag name
force (bool): Force
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
Returns:
(bool): ``True`` if successful | def tag(self, repository, tag=None, **kwargs):
return self.client.api.tag(self.id, repository, tag=tag, **kwargs) | 111,271 |
Pull the image digest.
Args:
platform (str): The platform to pull the image for.
Default: ``None``
Returns:
(:py:class:`Image`): A reference to the pulled image. | def pull(self, platform=None):
repository, _ = parse_repository_tag(self.image_name)
return self.collection.pull(repository, tag=self.id, platform=platform) | 111,273 |
Check whether the given platform identifier is available for this
digest.
Args:
platform (str or dict): A string using the ``os[/arch[/variant]]``
format, or a platform dictionary.
Returns:
(bool): ``True`` if the platform is recognized as available,
``False`` otherwise.
Raises:
:py:class:`docker.errors.InvalidArgument`
If the platform argument is not a valid descriptor. | def has_platform(self, platform):
if platform and not isinstance(platform, dict):
parts = platform.split('/')
if len(parts) > 3 or len(parts) < 1:
raise InvalidArgument(
'"{0}" is not a valid platform descriptor'.format(platform)
)
platform = {'os': parts[0]}
if len(parts) > 2:
platform['variant'] = parts[2]
if len(parts) > 1:
platform['architecture'] = parts[1]
return normalize_platform(
platform, self.client.version()
) in self.attrs['Platforms'] | 111,274 |
Gets an image.
Args:
name (str): The name of the image.
Returns:
(:py:class:`Image`): The image.
Raises:
:py:class:`docker.errors.ImageNotFound`
If the image does not exist.
:py:class:`docker.errors.APIError`
If the server returns an error. | def get(self, name):
return self.prepare_model(self.client.api.inspect_image(name)) | 111,277 |
Load an image that was previously saved using
:py:meth:`~docker.models.images.Image.save` (or ``docker save``).
Similar to ``docker load``.
Args:
data (binary): Image data to be loaded.
Returns:
(list of :py:class:`Image`): The images.
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error. | def load(self, data):
resp = self.client.api.load_image(data)
images = []
for chunk in resp:
if 'stream' in chunk:
match = re.search(
r'(^Loaded image ID: |^Loaded image: )(.+)$',
chunk['stream']
)
if match:
image_id = match.group(2)
images.append(image_id)
if 'error' in chunk:
raise ImageLoadError(chunk['error'])
return [self.get(i) for i in images] | 111,280 |
Like :py:meth:`attach`, but returns the underlying socket-like object
for the HTTP request.
Args:
params (dict): Dictionary of request parameters (e.g. ``stdout``,
``stderr``, ``stream``).
ws (bool): Use websockets instead of raw HTTP.
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error. | def attach_socket(self, **kwargs):
return self.client.api.attach_socket(self.id, **kwargs) | 111,291 |
Export the contents of the container's filesystem as a tar archive.
Args:
chunk_size (int): The number of bytes returned by each iteration
of the generator. If ``None``, data will be streamed as it is
received. Default: 2 MB
Returns:
(str): The filesystem tar archive
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error. | def export(self, chunk_size=DEFAULT_DATA_CHUNK_SIZE):
return self.client.api.export(self.id, chunk_size) | 111,294 |
Kill or send a signal to the container.
Args:
signal (str or int): The signal to send. Defaults to ``SIGKILL``
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error. | def kill(self, signal=None):
return self.client.api.kill(self.id, signal=signal) | 111,296 |
Insert a file or folder in this container using a tar archive as
source.
Args:
path (str): Path inside the container where the file(s) will be
extracted. Must exist.
data (bytes): tar data to be extracted
Returns:
(bool): True if the call succeeds.
Raises:
:py:class:`~docker.errors.APIError` If an error occurs. | def put_archive(self, path, data):
return self.client.api.put_archive(self.id, path, data) | 111,298 |
Remove this container. Similar to the ``docker rm`` command.
Args:
v (bool): Remove the volumes associated with the container
link (bool): Remove the specified link and not the underlying
container
force (bool): Force the removal of a running container (uses
``SIGKILL``)
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error. | def remove(self, **kwargs):
return self.client.api.remove_container(self.id, **kwargs) | 111,299 |
Rename this container. Similar to the ``docker rename`` command.
Args:
name (str): New name for the container
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error. | def rename(self, name):
return self.client.api.rename(self.id, name) | 111,300 |
Resize the tty session.
Args:
height (int): Height of tty session
width (int): Width of tty session
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error. | def resize(self, height, width):
return self.client.api.resize(self.id, height, width) | 111,301 |
Restart this container. Similar to the ``docker restart`` command.
Args:
timeout (int): Number of seconds to try to stop for before killing
the container. Once killed it will then be restarted. Default
is 10 seconds.
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error. | def restart(self, **kwargs):
return self.client.api.restart(self.id, **kwargs) | 111,302 |
Stops a container. Similar to the ``docker stop`` command.
Args:
timeout (int): Timeout in seconds to wait for the container to
stop before sending a ``SIGKILL``. Default: 10
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error. | def stop(self, **kwargs):
return self.client.api.stop(self.id, **kwargs) | 111,305 |
Display the running processes of the container.
Args:
ps_args (str): An optional arguments passed to ps (e.g. ``aux``)
Returns:
(str): The output of the top
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error. | def top(self, **kwargs):
return self.client.api.top(self.id, **kwargs) | 111,306 |
Get a container by name or ID.
Args:
container_id (str): Container name or ID.
Returns:
A :py:class:`Container` object.
Raises:
:py:class:`docker.errors.NotFound`
If the container does not exist.
:py:class:`docker.errors.APIError`
If the server returns an error. | def get(self, container_id):
resp = self.client.api.inspect_container(container_id)
return self.prepare_model(resp) | 111,311 |
List the tasks in this service.
Args:
filters (dict): A map of filters to process on the tasks list.
Valid filters: ``id``, ``name``, ``node``,
``label``, and ``desired-state``.
Returns:
:py:class:`list`: List of task dictionaries.
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error. | def tasks(self, filters=None):
if filters is None:
filters = {}
filters['service'] = self.id
return self.client.api.tasks(filters=filters) | 111,317 |
Scale service container.
Args:
replicas (int): The number of containers that should be running.
Returns:
bool: ``True`` if successful. | def scale(self, replicas):
if 'Global' in self.attrs['Spec']['Mode'].keys():
raise InvalidArgument('Cannot scale a global container')
service_mode = ServiceMode('replicated', replicas)
return self.client.api.update_service(self.id, self.version,
mode=service_mode,
fetch_current_spec=True) | 111,320 |
List services.
Args:
filters (dict): Filters to process on the nodes list. Valid
filters: ``id``, ``name`` , ``label`` and ``mode``.
Default: ``None``.
Returns:
list of :py:class:`Service`: The services.
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error. | def list(self, **kwargs):
return [
self.prepare_model(s)
for s in self.client.api.services(**kwargs)
] | 111,323 |
Disconnect a container from this network.
Args:
container (str): Container to disconnect from this network, as
either an ID, name, or
:py:class:`~docker.models.containers.Container` object.
force (bool): Force the container to disconnect from a network.
Default: ``False``
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error. | def disconnect(self, container, *args, **kwargs):
if isinstance(container, Container):
container = container.id
return self.client.api.disconnect_container_from_network(
container, self.id, *args, **kwargs
) | 111,333 |
Parses authentication entries
Args:
entries: Dict of authentication entries.
raise_on_error: If set to true, an invalid format will raise
InvalidConfigFile
Returns:
Authentication registry. | def parse_auth(cls, entries, raise_on_error=False):
conf = {}
for registry, entry in six.iteritems(entries):
if not isinstance(entry, dict):
log.debug(
'Config entry for key {0} is not auth config'.format(
registry
)
)
# We sometimes fall back to parsing the whole config as if it
# was the auth config by itself, for legacy purposes. In that
# case, we fail silently and return an empty conf if any of the
# keys is not formatted properly.
if raise_on_error:
raise errors.InvalidConfigFile(
'Invalid configuration for registry {0}'.format(
registry
)
)
return {}
if 'identitytoken' in entry:
log.debug(
'Found an IdentityToken entry for registry {0}'.format(
registry
)
)
conf[registry] = {
'IdentityToken': entry['identitytoken']
}
continue # Other values are irrelevant if we have a token
if 'auth' not in entry:
# Starting with engine v1.11 (API 1.23), an empty dictionary is
# a valid value in the auths config.
# https://github.com/docker/compose/issues/3265
log.debug(
'Auth data for {0} is absent. Client might be using a '
'credentials store instead.'.format(registry)
)
conf[registry] = {}
continue
username, password = decode_auth(entry['auth'])
log.debug(
'Found entry (registry={0}, username={1})'
.format(repr(registry), repr(username))
)
conf[registry] = {
'username': username,
'password': password,
'email': entry.get('email'),
'serveraddress': registry,
}
return conf | 111,368 |
Return low-level information about an exec command.
Args:
exec_id (str): ID of the exec instance
Returns:
(dict): Dictionary of values returned by the endpoint.
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error. | def exec_inspect(self, exec_id):
if isinstance(exec_id, dict):
exec_id = exec_id.get('Id')
res = self._get(self._url("/exec/{0}/json", exec_id))
return self._result(res, True) | 111,375 |
Resize the tty session used by the specified exec command.
Args:
exec_id (str): ID of the exec instance
height (int): Height of tty session
width (int): Width of tty session | def exec_resize(self, exec_id, height=None, width=None):
if isinstance(exec_id, dict):
exec_id = exec_id.get('Id')
params = {'h': height, 'w': width}
url = self._url("/exec/{0}/resize", exec_id)
res = self._post(url, params=params)
self._raise_for_status(res) | 111,376 |
Force a reload of the auth configuration
Args:
dockercfg_path (str): Use a custom path for the Docker config file
(default ``$HOME/.docker/config.json`` if present,
otherwise``$HOME/.dockercfg``)
Returns:
None | def reload_config(self, dockercfg_path=None):
self._auth_configs = auth.load_config(
dockercfg_path, credstore_env=self.credstore_env
) | 111,399 |
Returns a coroutine that will resolve once a response arrives.
Args:
message (`Message <telethon.tl.custom.message.Message>` | `int`, optional):
The message (or the message ID) for which a response
is expected. By default this is the last sent message.
timeout (`int` | `float`, optional):
If present, this `timeout` (in seconds) will override the
per-action timeout defined for the conversation. | async def get_response(self, message=None, *, timeout=None):
return await self._get_message(
message, self._response_indices, self._pending_responses, timeout,
lambda x, y: True
) | 112,302 |
Sends a code request to the specified phone number.
Args:
phone (`str` | `int`):
The phone to which the code will be sent.
force_sms (`bool`, optional):
Whether to force sending as SMS.
Returns:
An instance of :tl:`SentCode`. | async def send_code_request(self, phone, *, force_sms=False):
phone = utils.parse_phone(phone) or self._phone
phone_hash = self._phone_code_hash.get(phone)
if not phone_hash:
try:
result = await self(functions.auth.SendCodeRequest(
phone, self.api_id, self.api_hash, types.CodeSettings()))
except errors.AuthRestartError:
return self.send_code_request(phone, force_sms=force_sms)
self._tos = result.terms_of_service
self._phone_code_hash[phone] = phone_hash = result.phone_code_hash
else:
force_sms = True
self._phone = phone
if force_sms:
result = await self(
functions.auth.ResendCodeRequest(phone, phone_hash))
self._phone_code_hash[phone] = result.phone_code_hash
return result | 112,328 |
Gets "me" (the self user) which is currently authenticated,
or None if the request fails (hence, not authenticated).
Args:
input_peer (`bool`, optional):
Whether to return the :tl:`InputPeerUser` version or the normal
:tl:`User`. This can be useful if you just need to know the ID
of yourself.
Returns:
Your own :tl:`User`. | async def get_me(self, input_peer=False):
if input_peer and self._self_input_peer:
return self._self_input_peer
try:
me = (await self(
functions.users.GetUsersRequest([types.InputUserSelf()])))[0]
self._bot = me.bot
if not self._self_input_peer:
self._self_input_peer = utils.get_input_peer(
me, allow_self=False
)
return self._self_input_peer if input_peer else me
except errors.UnauthorizedError:
return None | 112,386 |
Decorator helper method around `add_event_handler`. Example:
>>> from telethon import TelegramClient, events
>>> client = TelegramClient(...)
>>>
>>> @client.on(events.NewMessage)
... async def handler(event):
... ...
...
>>>
Args:
event (`_EventBuilder` | `type`):
The event builder class or instance to be used,
for instance ``events.NewMessage``. | def on(self, event):
def decorator(f):
self.add_event_handler(f, event)
return f
return decorator | 112,436 |
Creates a new inline result of photo type.
Args:
file (`obj`, optional):
Same as ``file`` for `client.send_file
<telethon.client.uploads.UploadMethods.send_file>`. | async def photo(
self, file, *, id=None,
text=None, parse_mode=(), link_preview=True,
geo=None, period=60, contact=None, game=False, buttons=None
):
try:
fh = utils.get_input_photo(file)
except TypeError:
fh = await self._client.upload_file(file, use_cache=types.InputPhoto)
if not isinstance(fh, types.InputPhoto):
r = await self._client(functions.messages.UploadMediaRequest(
types.InputPeerSelf(), media=types.InputMediaUploadedPhoto(fh)
))
fh = utils.get_input_photo(r.photo)
result = types.InputBotInlineResultPhoto(
id=id or '',
type='photo',
photo=fh,
send_message=await self._message(
text=text or '',
parse_mode=parse_mode,
link_preview=link_preview,
geo=geo,
period=period,
contact=contact,
game=game,
buttons=buttons
)
)
if id is None:
result.id = hashlib.sha256(bytes(result)).hexdigest()
return result | 112,512 |
Creates a new inline result of game type.
Args:
short_name (`str`):
The short name of the game to use. | async def game(
self, short_name, *, id=None,
text=None, parse_mode=(), link_preview=True,
geo=None, period=60, contact=None, game=False, buttons=None
):
result = types.InputBotInlineResultGame(
id=id or '',
short_name=short_name,
send_message=await self._message(
text=text, parse_mode=parse_mode, link_preview=link_preview,
geo=geo, period=period,
contact=contact,
game=game,
buttons=buttons
)
)
if id is None:
result.id = hashlib.sha256(bytes(result)).hexdigest()
return result | 112,514 |
Compile digest auth response
If the qop directive's value is "auth" or "auth-int" , then compute the response as follows:
RESPONSE = MD5(HA1:nonce:nonceCount:clienNonce:qop:HA2)
Else if the qop directive is unspecified, then compute the response as follows:
RESPONSE = MD5(HA1:nonce:HA2)
Arguments:
- `credentials`: credentials dict
- `password`: request user password
- `request`: request dict | def response(credentials, password, request):
response = None
algorithm = credentials.get('algorithm')
HA1_value = HA1(
credentials.get('realm'),
credentials.get('username'),
password,
algorithm
)
HA2_value = HA2(credentials, request, algorithm)
if credentials.get('qop') is None:
response = H(b":".join([
HA1_value.encode('utf-8'),
credentials.get('nonce', '').encode('utf-8'),
HA2_value.encode('utf-8')
]), algorithm)
elif credentials.get('qop') == 'auth' or credentials.get('qop') == 'auth-int':
for k in 'nonce', 'nc', 'cnonce', 'qop':
if k not in credentials:
raise ValueError("%s required for response H" % k)
response = H(b":".join([HA1_value.encode('utf-8'),
credentials.get('nonce').encode('utf-8'),
credentials.get('nc').encode('utf-8'),
credentials.get('cnonce').encode('utf-8'),
credentials.get('qop').encode('utf-8'),
HA2_value.encode('utf-8')]), algorithm)
else:
raise ValueError("qop value are wrong")
return response | 114,843 |
302 Redirects n times.
---
tags:
- Redirects
parameters:
- in: path
name: n
type: int
produces:
- text/html
responses:
302:
description: A redirection. | def redirect_n_times(n):
assert n > 0
absolute = request.args.get("absolute", "false").lower() == "true"
if n == 1:
return redirect(url_for("view_get", _external=absolute))
if absolute:
return _redirect("absolute", n, True)
else:
return _redirect("relative", n, False) | 114,852 |
Relatively 302 Redirects n times.
---
tags:
- Redirects
parameters:
- in: path
name: n
type: int
produces:
- text/html
responses:
302:
description: A redirection. | def relative_redirect_n_times(n):
assert n > 0
response = app.make_response("")
response.status_code = 302
if n == 1:
response.headers["Location"] = url_for("view_get")
return response
response.headers["Location"] = url_for("relative_redirect_n_times", n=n - 1)
return response | 114,855 |
Absolutely 302 Redirects n times.
---
tags:
- Redirects
parameters:
- in: path
name: n
type: int
produces:
- text/html
responses:
302:
description: A redirection. | def absolute_redirect_n_times(n):
assert n > 0
if n == 1:
return redirect(url_for("view_get", _external=True))
return _redirect("absolute", n, True) | 114,856 |
Stream n JSON responses
---
tags:
- Dynamic data
parameters:
- in: path
name: n
type: int
produces:
- application/json
responses:
200:
description: Streamed JSON responses. | def stream_n_messages(n):
response = get_dict("url", "args", "headers", "origin")
n = min(n, 100)
def generate_stream():
for i in range(n):
response["id"] = i
yield json.dumps(response) + "\n"
return Response(generate_stream(), headers={"Content-Type": "application/json"}) | 114,857 |
Return status code or random status code if more than one are given
---
tags:
- Status codes
parameters:
- in: path
name: codes
produces:
- text/plain
responses:
100:
description: Informational responses
200:
description: Success
300:
description: Redirection
400:
description: Client Errors
500:
description: Server Errors | def view_status_code(codes):
if "," not in codes:
try:
code = int(codes)
except ValueError:
return Response("Invalid status code", status=400)
return status_code(code)
choices = []
for choice in codes.split(","):
if ":" not in choice:
code = choice
weight = 1
else:
code, weight = choice.split(":")
try:
choices.append((int(code), float(weight)))
except ValueError:
return Response("Invalid status code", status=400)
code = weighted_choice(choices)
return status_code(code) | 114,858 |
Returns a set of response headers from the query string.
---
tags:
- Response inspection
parameters:
- in: query
name: freeform
explode: true
allowEmptyValue: true
schema:
type: object
additionalProperties:
type: string
style: form
produces:
- application/json
responses:
200:
description: Response headers | def response_headers():
# Pending swaggerUI update
# https://github.com/swagger-api/swagger-ui/issues/3850
headers = MultiDict(request.args.items(multi=True))
response = jsonify(list(headers.lists()))
while True:
original_data = response.data
d = {}
for key in response.headers.keys():
value = response.headers.get_all(key)
if len(value) == 1:
value = value[0]
d[key] = value
response = jsonify(d)
for key, value in headers.items(multi=True):
response.headers.add(key, value)
response_has_changed = response.data != original_data
if not response_has_changed:
break
return response | 114,859 |
Sets a cookie and redirects to cookie list.
---
tags:
- Cookies
parameters:
- in: path
name: name
type: string
- in: path
name: value
type: string
produces:
- text/plain
responses:
200:
description: Set cookies and redirects to cookie list. | def set_cookie(name, value):
r = app.make_response(redirect(url_for("view_cookies")))
r.set_cookie(key=name, value=value, secure=secure_cookie())
return r | 114,861 |