repository_name
stringlengths 7
54
| func_path_in_repository
stringlengths 4
175
| func_name
stringlengths 1
129
| whole_func_string
stringlengths 91
50.9k
| language
stringclasses 1
value | func_code_string
stringlengths 91
50.9k
| func_code_tokens
sequence | func_documentation_string
stringlengths 1
31.6k
| func_documentation_tokens
sequence | split_name
stringclasses 1
value | func_code_url
stringlengths 89
268
| score
float64 0
0.09
|
---|---|---|---|---|---|---|---|---|---|---|---|
idlesign/uwsgiconf | uwsgiconf/options/routing.py | Routing.set_error_pages | def set_error_pages(self, codes_map=None, common_prefix=None):
"""Add an error pages for managed 403, 404, 500 responses.
Shortcut for ``.set_error_page()``.
:param dict codes_map: Status code mapped into an html filepath or
just a filename if common_prefix is used.
If not set, filename containing status code is presumed: 400.html, 500.html, etc.
:param str|unicode common_prefix: Common path (prefix) for all files.
"""
statuses = [403, 404, 500]
if common_prefix:
if not codes_map:
codes_map = {code: '%s.html' % code for code in statuses}
for code, filename in codes_map.items():
codes_map[code] = os.path.join(common_prefix, filename)
for code, filepath in codes_map.items():
self.set_error_page(code, filepath)
return self._section | python | def set_error_pages(self, codes_map=None, common_prefix=None):
"""Add an error pages for managed 403, 404, 500 responses.
Shortcut for ``.set_error_page()``.
:param dict codes_map: Status code mapped into an html filepath or
just a filename if common_prefix is used.
If not set, filename containing status code is presumed: 400.html, 500.html, etc.
:param str|unicode common_prefix: Common path (prefix) for all files.
"""
statuses = [403, 404, 500]
if common_prefix:
if not codes_map:
codes_map = {code: '%s.html' % code for code in statuses}
for code, filename in codes_map.items():
codes_map[code] = os.path.join(common_prefix, filename)
for code, filepath in codes_map.items():
self.set_error_page(code, filepath)
return self._section | [
"def",
"set_error_pages",
"(",
"self",
",",
"codes_map",
"=",
"None",
",",
"common_prefix",
"=",
"None",
")",
":",
"statuses",
"=",
"[",
"403",
",",
"404",
",",
"500",
"]",
"if",
"common_prefix",
":",
"if",
"not",
"codes_map",
":",
"codes_map",
"=",
"{",
"code",
":",
"'%s.html'",
"%",
"code",
"for",
"code",
"in",
"statuses",
"}",
"for",
"code",
",",
"filename",
"in",
"codes_map",
".",
"items",
"(",
")",
":",
"codes_map",
"[",
"code",
"]",
"=",
"os",
".",
"path",
".",
"join",
"(",
"common_prefix",
",",
"filename",
")",
"for",
"code",
",",
"filepath",
"in",
"codes_map",
".",
"items",
"(",
")",
":",
"self",
".",
"set_error_page",
"(",
"code",
",",
"filepath",
")",
"return",
"self",
".",
"_section"
] | Add an error pages for managed 403, 404, 500 responses.
Shortcut for ``.set_error_page()``.
:param dict codes_map: Status code mapped into an html filepath or
just a filename if common_prefix is used.
If not set, filename containing status code is presumed: 400.html, 500.html, etc.
:param str|unicode common_prefix: Common path (prefix) for all files. | [
"Add",
"an",
"error",
"pages",
"for",
"managed",
"403",
"404",
"500",
"responses",
"."
] | train | https://github.com/idlesign/uwsgiconf/blob/475407acb44199edbf7e0a66261bfeb51de1afae/uwsgiconf/options/routing.py#L332-L357 | 0.003293 |
jobovy/galpy | galpy/potential/FerrersPotential.py | FerrersPotential._R2deriv | def _R2deriv(self,R,z,phi=0.,t=0.):
"""
NAME:
_R2deriv
PURPOSE:
evaluate the second radial derivative for this potential
INPUT:
R - Galactocentric cylindrical radius
z - vertical height
phi - azimuth
t - time
OUTPUT:
the second radial derivative
"""
if not self.isNonAxi:
phi= 0.
x,y,z= self._compute_xyz(R,phi,z,t)
phixxa= self._2ndderiv_xyz(x,y,z,0,0)
phixya= self._2ndderiv_xyz(x,y,z,0,1)
phiyya= self._2ndderiv_xyz(x,y,z,1,1)
ang = self._omegab*t + self._pa
c, s = np.cos(ang), np.sin(ang)
phixx = c**2*phixxa + 2.*c*s*phixya + s**2*phiyya
phixy = (c**2-s**2)*phixya + c*s*(phiyya - phixxa)
phiyy = s**2*phixxa - 2.*c*s*phixya + c**2*phiyya
return np.cos(phi)**2.*phixx + np.sin(phi)**2.*phiyy + \
2.*np.cos(phi)*np.sin(phi)*phixy | python | def _R2deriv(self,R,z,phi=0.,t=0.):
"""
NAME:
_R2deriv
PURPOSE:
evaluate the second radial derivative for this potential
INPUT:
R - Galactocentric cylindrical radius
z - vertical height
phi - azimuth
t - time
OUTPUT:
the second radial derivative
"""
if not self.isNonAxi:
phi= 0.
x,y,z= self._compute_xyz(R,phi,z,t)
phixxa= self._2ndderiv_xyz(x,y,z,0,0)
phixya= self._2ndderiv_xyz(x,y,z,0,1)
phiyya= self._2ndderiv_xyz(x,y,z,1,1)
ang = self._omegab*t + self._pa
c, s = np.cos(ang), np.sin(ang)
phixx = c**2*phixxa + 2.*c*s*phixya + s**2*phiyya
phixy = (c**2-s**2)*phixya + c*s*(phiyya - phixxa)
phiyy = s**2*phixxa - 2.*c*s*phixya + c**2*phiyya
return np.cos(phi)**2.*phixx + np.sin(phi)**2.*phiyy + \
2.*np.cos(phi)*np.sin(phi)*phixy | [
"def",
"_R2deriv",
"(",
"self",
",",
"R",
",",
"z",
",",
"phi",
"=",
"0.",
",",
"t",
"=",
"0.",
")",
":",
"if",
"not",
"self",
".",
"isNonAxi",
":",
"phi",
"=",
"0.",
"x",
",",
"y",
",",
"z",
"=",
"self",
".",
"_compute_xyz",
"(",
"R",
",",
"phi",
",",
"z",
",",
"t",
")",
"phixxa",
"=",
"self",
".",
"_2ndderiv_xyz",
"(",
"x",
",",
"y",
",",
"z",
",",
"0",
",",
"0",
")",
"phixya",
"=",
"self",
".",
"_2ndderiv_xyz",
"(",
"x",
",",
"y",
",",
"z",
",",
"0",
",",
"1",
")",
"phiyya",
"=",
"self",
".",
"_2ndderiv_xyz",
"(",
"x",
",",
"y",
",",
"z",
",",
"1",
",",
"1",
")",
"ang",
"=",
"self",
".",
"_omegab",
"*",
"t",
"+",
"self",
".",
"_pa",
"c",
",",
"s",
"=",
"np",
".",
"cos",
"(",
"ang",
")",
",",
"np",
".",
"sin",
"(",
"ang",
")",
"phixx",
"=",
"c",
"**",
"2",
"*",
"phixxa",
"+",
"2.",
"*",
"c",
"*",
"s",
"*",
"phixya",
"+",
"s",
"**",
"2",
"*",
"phiyya",
"phixy",
"=",
"(",
"c",
"**",
"2",
"-",
"s",
"**",
"2",
")",
"*",
"phixya",
"+",
"c",
"*",
"s",
"*",
"(",
"phiyya",
"-",
"phixxa",
")",
"phiyy",
"=",
"s",
"**",
"2",
"*",
"phixxa",
"-",
"2.",
"*",
"c",
"*",
"s",
"*",
"phixya",
"+",
"c",
"**",
"2",
"*",
"phiyya",
"return",
"np",
".",
"cos",
"(",
"phi",
")",
"**",
"2.",
"*",
"phixx",
"+",
"np",
".",
"sin",
"(",
"phi",
")",
"**",
"2.",
"*",
"phiyy",
"+",
"2.",
"*",
"np",
".",
"cos",
"(",
"phi",
")",
"*",
"np",
".",
"sin",
"(",
"phi",
")",
"*",
"phixy"
] | NAME:
_R2deriv
PURPOSE:
evaluate the second radial derivative for this potential
INPUT:
R - Galactocentric cylindrical radius
z - vertical height
phi - azimuth
t - time
OUTPUT:
the second radial derivative | [
"NAME",
":",
"_R2deriv",
"PURPOSE",
":",
"evaluate",
"the",
"second",
"radial",
"derivative",
"for",
"this",
"potential",
"INPUT",
":",
"R",
"-",
"Galactocentric",
"cylindrical",
"radius",
"z",
"-",
"vertical",
"height",
"phi",
"-",
"azimuth",
"t",
"-",
"time",
"OUTPUT",
":",
"the",
"second",
"radial",
"derivative"
] | train | https://github.com/jobovy/galpy/blob/9c5b9fe65d58835624dffe432be282060918ee08/galpy/potential/FerrersPotential.py#L229-L255 | 0.028836 |
lenzenmi/asyncio_dispatch | asyncio_dispatch/dispatcher.py | Signal.connect | def connect(self, callback, sender=None, senders=None, key=None, keys=None, weak=True):
'''
*This method is a coroutine.*
Connect a callback. By default, the callback will be called any time
:meth:`asyncio_dispatch.Signal.send` is called. You may restrict this by adding ``senders``
and/or ``keys`` which will cause the connected callback to only be called when
:meth:`asyncio_dispatch.Signal.send` is called with a matching `sender` and/or ``key``.
The difference between a ``sender`` and a ``key`` is that a ``sender`` refers to a specific
object's location in memory and uses :func:`builtins.id` for comparison while a ``key``
uses :func:`builtins.hash` for comparison. Thus two strings of the same value will always
be equal when used as a ``key`` but may be seen as different objects when used as a
``sender``.
:param callback: A callable to be called when the signal is sent
:param Object sender: Any python object. Connects the callback to a single ``sender``.
:param list senders: An iterable of ``sender`` objects. Connects the callback against
multiple ``senders``.
:param str key: Any object that can be used as a key in a python dictionary. Connects the
callback against a single ``key``.
:param list keys: An iterable of ``key`` objects. Connects the callback against multiple
``keys``.
:param weak: If ``True``, the callback will be stored as a weakreference. If a long-lived
reference is required, use ``False``.
'''
weak_callback = yield from self._get_ref(callback, weak)
# dispatch
if (sender is None) and (senders is None) and (key is None) and (keys is None):
# subscribe always activate the callback when the signal is sent
with (yield from self._lock_all):
self._all.add(weak_callback)
else:
if sender is not None:
yield from self._add_sender(sender, weak_callback)
if senders is not None:
for sender in senders:
yield from self._add_sender(sender, weak_callback)
if key is not None:
yield from self._add_key(key, weak_callback)
if keys is not None:
for key in keys:
yield from self._add_key(key, weak_callback) | python | def connect(self, callback, sender=None, senders=None, key=None, keys=None, weak=True):
'''
*This method is a coroutine.*
Connect a callback. By default, the callback will be called any time
:meth:`asyncio_dispatch.Signal.send` is called. You may restrict this by adding ``senders``
and/or ``keys`` which will cause the connected callback to only be called when
:meth:`asyncio_dispatch.Signal.send` is called with a matching `sender` and/or ``key``.
The difference between a ``sender`` and a ``key`` is that a ``sender`` refers to a specific
object's location in memory and uses :func:`builtins.id` for comparison while a ``key``
uses :func:`builtins.hash` for comparison. Thus two strings of the same value will always
be equal when used as a ``key`` but may be seen as different objects when used as a
``sender``.
:param callback: A callable to be called when the signal is sent
:param Object sender: Any python object. Connects the callback to a single ``sender``.
:param list senders: An iterable of ``sender`` objects. Connects the callback against
multiple ``senders``.
:param str key: Any object that can be used as a key in a python dictionary. Connects the
callback against a single ``key``.
:param list keys: An iterable of ``key`` objects. Connects the callback against multiple
``keys``.
:param weak: If ``True``, the callback will be stored as a weakreference. If a long-lived
reference is required, use ``False``.
'''
weak_callback = yield from self._get_ref(callback, weak)
# dispatch
if (sender is None) and (senders is None) and (key is None) and (keys is None):
# subscribe always activate the callback when the signal is sent
with (yield from self._lock_all):
self._all.add(weak_callback)
else:
if sender is not None:
yield from self._add_sender(sender, weak_callback)
if senders is not None:
for sender in senders:
yield from self._add_sender(sender, weak_callback)
if key is not None:
yield from self._add_key(key, weak_callback)
if keys is not None:
for key in keys:
yield from self._add_key(key, weak_callback) | [
"def",
"connect",
"(",
"self",
",",
"callback",
",",
"sender",
"=",
"None",
",",
"senders",
"=",
"None",
",",
"key",
"=",
"None",
",",
"keys",
"=",
"None",
",",
"weak",
"=",
"True",
")",
":",
"weak_callback",
"=",
"yield",
"from",
"self",
".",
"_get_ref",
"(",
"callback",
",",
"weak",
")",
"# dispatch",
"if",
"(",
"sender",
"is",
"None",
")",
"and",
"(",
"senders",
"is",
"None",
")",
"and",
"(",
"key",
"is",
"None",
")",
"and",
"(",
"keys",
"is",
"None",
")",
":",
"# subscribe always activate the callback when the signal is sent",
"with",
"(",
"yield",
"from",
"self",
".",
"_lock_all",
")",
":",
"self",
".",
"_all",
".",
"add",
"(",
"weak_callback",
")",
"else",
":",
"if",
"sender",
"is",
"not",
"None",
":",
"yield",
"from",
"self",
".",
"_add_sender",
"(",
"sender",
",",
"weak_callback",
")",
"if",
"senders",
"is",
"not",
"None",
":",
"for",
"sender",
"in",
"senders",
":",
"yield",
"from",
"self",
".",
"_add_sender",
"(",
"sender",
",",
"weak_callback",
")",
"if",
"key",
"is",
"not",
"None",
":",
"yield",
"from",
"self",
".",
"_add_key",
"(",
"key",
",",
"weak_callback",
")",
"if",
"keys",
"is",
"not",
"None",
":",
"for",
"key",
"in",
"keys",
":",
"yield",
"from",
"self",
".",
"_add_key",
"(",
"key",
",",
"weak_callback",
")"
] | *This method is a coroutine.*
Connect a callback. By default, the callback will be called any time
:meth:`asyncio_dispatch.Signal.send` is called. You may restrict this by adding ``senders``
and/or ``keys`` which will cause the connected callback to only be called when
:meth:`asyncio_dispatch.Signal.send` is called with a matching `sender` and/or ``key``.
The difference between a ``sender`` and a ``key`` is that a ``sender`` refers to a specific
object's location in memory and uses :func:`builtins.id` for comparison while a ``key``
uses :func:`builtins.hash` for comparison. Thus two strings of the same value will always
be equal when used as a ``key`` but may be seen as different objects when used as a
``sender``.
:param callback: A callable to be called when the signal is sent
:param Object sender: Any python object. Connects the callback to a single ``sender``.
:param list senders: An iterable of ``sender`` objects. Connects the callback against
multiple ``senders``.
:param str key: Any object that can be used as a key in a python dictionary. Connects the
callback against a single ``key``.
:param list keys: An iterable of ``key`` objects. Connects the callback against multiple
``keys``.
:param weak: If ``True``, the callback will be stored as a weakreference. If a long-lived
reference is required, use ``False``. | [
"*",
"This",
"method",
"is",
"a",
"coroutine",
".",
"*"
] | train | https://github.com/lenzenmi/asyncio_dispatch/blob/9c8e7fba65c69ba146e6559ec7897bdadb24880a/asyncio_dispatch/dispatcher.py#L55-L102 | 0.006512 |
gpennington/PyMarvel | marvel/marvel.py | Marvel.get_characters | def get_characters(self, *args, **kwargs):
"""Fetches lists of comic characters with optional filters.
get /v1/public/characters/{characterId}
:returns: CharacterDataWrapper
>>> m = Marvel(public_key, private_key)
>>> cdw = m.get_characters(orderBy="name,-modified", limit="5", offset="15")
>>> print cdw.data.count
1401
>>> for result in cdw.data.results:
... print result.name
Aginar
Air-Walker (Gabriel Lan)
Ajak
Ajaxis
Akemi
"""
#pass url string and params string to _call
response = json.loads(self._call(Character.resource_url(), self._params(kwargs)).text)
return CharacterDataWrapper(self, response, kwargs) | python | def get_characters(self, *args, **kwargs):
"""Fetches lists of comic characters with optional filters.
get /v1/public/characters/{characterId}
:returns: CharacterDataWrapper
>>> m = Marvel(public_key, private_key)
>>> cdw = m.get_characters(orderBy="name,-modified", limit="5", offset="15")
>>> print cdw.data.count
1401
>>> for result in cdw.data.results:
... print result.name
Aginar
Air-Walker (Gabriel Lan)
Ajak
Ajaxis
Akemi
"""
#pass url string and params string to _call
response = json.loads(self._call(Character.resource_url(), self._params(kwargs)).text)
return CharacterDataWrapper(self, response, kwargs) | [
"def",
"get_characters",
"(",
"self",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"#pass url string and params string to _call",
"response",
"=",
"json",
".",
"loads",
"(",
"self",
".",
"_call",
"(",
"Character",
".",
"resource_url",
"(",
")",
",",
"self",
".",
"_params",
"(",
"kwargs",
")",
")",
".",
"text",
")",
"return",
"CharacterDataWrapper",
"(",
"self",
",",
"response",
",",
"kwargs",
")"
] | Fetches lists of comic characters with optional filters.
get /v1/public/characters/{characterId}
:returns: CharacterDataWrapper
>>> m = Marvel(public_key, private_key)
>>> cdw = m.get_characters(orderBy="name,-modified", limit="5", offset="15")
>>> print cdw.data.count
1401
>>> for result in cdw.data.results:
... print result.name
Aginar
Air-Walker (Gabriel Lan)
Ajak
Ajaxis
Akemi | [
"Fetches",
"lists",
"of",
"comic",
"characters",
"with",
"optional",
"filters",
"."
] | train | https://github.com/gpennington/PyMarvel/blob/2617162836f2b7c525ed6c4ff6f1e86a07284fd1/marvel/marvel.py#L106-L128 | 0.007732 |
pyusb/pyusb | usb/legacy.py | DeviceHandle.bulkRead | def bulkRead(self, endpoint, size, timeout = 100):
r"""Performs a bulk read request to the endpoint specified.
Arguments:
endpoint: endpoint number.
size: number of bytes to read.
timeout: operation timeout in milliseconds. (default: 100)
Returns a tuple with the data read.
"""
return self.dev.read(endpoint, size, timeout) | python | def bulkRead(self, endpoint, size, timeout = 100):
r"""Performs a bulk read request to the endpoint specified.
Arguments:
endpoint: endpoint number.
size: number of bytes to read.
timeout: operation timeout in milliseconds. (default: 100)
Returns a tuple with the data read.
"""
return self.dev.read(endpoint, size, timeout) | [
"def",
"bulkRead",
"(",
"self",
",",
"endpoint",
",",
"size",
",",
"timeout",
"=",
"100",
")",
":",
"return",
"self",
".",
"dev",
".",
"read",
"(",
"endpoint",
",",
"size",
",",
"timeout",
")"
] | r"""Performs a bulk read request to the endpoint specified.
Arguments:
endpoint: endpoint number.
size: number of bytes to read.
timeout: operation timeout in milliseconds. (default: 100)
Returns a tuple with the data read. | [
"r",
"Performs",
"a",
"bulk",
"read",
"request",
"to",
"the",
"endpoint",
"specified",
"."
] | train | https://github.com/pyusb/pyusb/blob/ffe6faf42c6ad273880b0b464b9bbf44c1d4b2e9/usb/legacy.py#L143-L152 | 0.009501 |
camptocamp/Studio | studio/controllers/datastores.py | DatastoresController.update | def update(self, id):
"""PUT /datastores/id: Update an existing item."""
# url('DataStores', id=ID)
content = request.environ['wsgi.input'].read(int(request.environ['CONTENT_LENGTH']))
content = content.decode('utf8')
content = simplejson.loads(content)
result = meta.Session.query(DataStore).get(id)
result.name = content['name']
result.type = content['type']
result.ogrstring = content['ogrstring']
meta.Session.commit()
response.status = 201 | python | def update(self, id):
"""PUT /datastores/id: Update an existing item."""
# url('DataStores', id=ID)
content = request.environ['wsgi.input'].read(int(request.environ['CONTENT_LENGTH']))
content = content.decode('utf8')
content = simplejson.loads(content)
result = meta.Session.query(DataStore).get(id)
result.name = content['name']
result.type = content['type']
result.ogrstring = content['ogrstring']
meta.Session.commit()
response.status = 201 | [
"def",
"update",
"(",
"self",
",",
"id",
")",
":",
"# url('DataStores', id=ID)",
"content",
"=",
"request",
".",
"environ",
"[",
"'wsgi.input'",
"]",
".",
"read",
"(",
"int",
"(",
"request",
".",
"environ",
"[",
"'CONTENT_LENGTH'",
"]",
")",
")",
"content",
"=",
"content",
".",
"decode",
"(",
"'utf8'",
")",
"content",
"=",
"simplejson",
".",
"loads",
"(",
"content",
")",
"result",
"=",
"meta",
".",
"Session",
".",
"query",
"(",
"DataStore",
")",
".",
"get",
"(",
"id",
")",
"result",
".",
"name",
"=",
"content",
"[",
"'name'",
"]",
"result",
".",
"type",
"=",
"content",
"[",
"'type'",
"]",
"result",
".",
"ogrstring",
"=",
"content",
"[",
"'ogrstring'",
"]",
"meta",
".",
"Session",
".",
"commit",
"(",
")",
"response",
".",
"status",
"=",
"201"
] | PUT /datastores/id: Update an existing item. | [
"PUT",
"/",
"datastores",
"/",
"id",
":",
"Update",
"an",
"existing",
"item",
"."
] | train | https://github.com/camptocamp/Studio/blob/43cb7298434fb606b15136801b79b03571a2f27e/studio/controllers/datastores.py#L67-L78 | 0.007407 |
openstack/pymod2pkg | pymod2pkg/__init__.py | default_ubuntu_tr | def default_ubuntu_tr(mod):
"""
Default translation function for Ubuntu based systems
"""
pkg = 'python-%s' % mod.lower()
py2pkg = pkg
py3pkg = 'python3-%s' % mod.lower()
return (pkg, py2pkg, py3pkg) | python | def default_ubuntu_tr(mod):
"""
Default translation function for Ubuntu based systems
"""
pkg = 'python-%s' % mod.lower()
py2pkg = pkg
py3pkg = 'python3-%s' % mod.lower()
return (pkg, py2pkg, py3pkg) | [
"def",
"default_ubuntu_tr",
"(",
"mod",
")",
":",
"pkg",
"=",
"'python-%s'",
"%",
"mod",
".",
"lower",
"(",
")",
"py2pkg",
"=",
"pkg",
"py3pkg",
"=",
"'python3-%s'",
"%",
"mod",
".",
"lower",
"(",
")",
"return",
"(",
"pkg",
",",
"py2pkg",
",",
"py3pkg",
")"
] | Default translation function for Ubuntu based systems | [
"Default",
"translation",
"function",
"for",
"Ubuntu",
"based",
"systems"
] | train | https://github.com/openstack/pymod2pkg/blob/f9a2f02fbfa0b2cfcdb4a7494c9ddbd10859065a/pymod2pkg/__init__.py#L87-L94 | 0.004405 |
openego/eTraGo | etrago/tools/utilities.py | set_line_costs | def set_line_costs(network, args,
cost110=230, cost220=290, cost380=85, costDC=375):
""" Set capital costs for extendable lines in respect to PyPSA [€/MVA]
Parameters
----------
network : :class:`pypsa.Network
Overall container of PyPSA
args: dict containing settings from appl.py
cost110 : capital costs per km for 110kV lines and cables
default: 230€/MVA/km, source: costs for extra circuit in
dena Verteilnetzstudie, p. 146)
cost220 : capital costs per km for 220kV lines and cables
default: 280€/MVA/km, source: costs for extra circuit in
NEP 2025, capactity from most used 220 kV lines in model
cost380 : capital costs per km for 380kV lines and cables
default: 85€/MVA/km, source: costs for extra circuit in
NEP 2025, capactity from most used 380 kV lines in NEP
costDC : capital costs per km for DC-lines
default: 375€/MVA/km, source: costs for DC transmission line
in NEP 2035
-------
"""
network.lines["v_nom"] = network.lines.bus0.map(network.buses.v_nom)
network.lines.loc[(network.lines.v_nom == 110),
'capital_cost'] = cost110 * network.lines.length /\
args['branch_capacity_factor']['HV']
network.lines.loc[(network.lines.v_nom == 220),
'capital_cost'] = cost220 * network.lines.length/\
args['branch_capacity_factor']['eHV']
network.lines.loc[(network.lines.v_nom == 380),
'capital_cost'] = cost380 * network.lines.length/\
args['branch_capacity_factor']['eHV']
network.links.loc[network.links.p_nom_extendable,
'capital_cost'] = costDC * network.links.length
return network | python | def set_line_costs(network, args,
cost110=230, cost220=290, cost380=85, costDC=375):
""" Set capital costs for extendable lines in respect to PyPSA [€/MVA]
Parameters
----------
network : :class:`pypsa.Network
Overall container of PyPSA
args: dict containing settings from appl.py
cost110 : capital costs per km for 110kV lines and cables
default: 230€/MVA/km, source: costs for extra circuit in
dena Verteilnetzstudie, p. 146)
cost220 : capital costs per km for 220kV lines and cables
default: 280€/MVA/km, source: costs for extra circuit in
NEP 2025, capactity from most used 220 kV lines in model
cost380 : capital costs per km for 380kV lines and cables
default: 85€/MVA/km, source: costs for extra circuit in
NEP 2025, capactity from most used 380 kV lines in NEP
costDC : capital costs per km for DC-lines
default: 375€/MVA/km, source: costs for DC transmission line
in NEP 2035
-------
"""
network.lines["v_nom"] = network.lines.bus0.map(network.buses.v_nom)
network.lines.loc[(network.lines.v_nom == 110),
'capital_cost'] = cost110 * network.lines.length /\
args['branch_capacity_factor']['HV']
network.lines.loc[(network.lines.v_nom == 220),
'capital_cost'] = cost220 * network.lines.length/\
args['branch_capacity_factor']['eHV']
network.lines.loc[(network.lines.v_nom == 380),
'capital_cost'] = cost380 * network.lines.length/\
args['branch_capacity_factor']['eHV']
network.links.loc[network.links.p_nom_extendable,
'capital_cost'] = costDC * network.links.length
return network | [
"def",
"set_line_costs",
"(",
"network",
",",
"args",
",",
"cost110",
"=",
"230",
",",
"cost220",
"=",
"290",
",",
"cost380",
"=",
"85",
",",
"costDC",
"=",
"375",
")",
":",
"network",
".",
"lines",
"[",
"\"v_nom\"",
"]",
"=",
"network",
".",
"lines",
".",
"bus0",
".",
"map",
"(",
"network",
".",
"buses",
".",
"v_nom",
")",
"network",
".",
"lines",
".",
"loc",
"[",
"(",
"network",
".",
"lines",
".",
"v_nom",
"==",
"110",
")",
",",
"'capital_cost'",
"]",
"=",
"cost110",
"*",
"network",
".",
"lines",
".",
"length",
"/",
"args",
"[",
"'branch_capacity_factor'",
"]",
"[",
"'HV'",
"]",
"network",
".",
"lines",
".",
"loc",
"[",
"(",
"network",
".",
"lines",
".",
"v_nom",
"==",
"220",
")",
",",
"'capital_cost'",
"]",
"=",
"cost220",
"*",
"network",
".",
"lines",
".",
"length",
"/",
"args",
"[",
"'branch_capacity_factor'",
"]",
"[",
"'eHV'",
"]",
"network",
".",
"lines",
".",
"loc",
"[",
"(",
"network",
".",
"lines",
".",
"v_nom",
"==",
"380",
")",
",",
"'capital_cost'",
"]",
"=",
"cost380",
"*",
"network",
".",
"lines",
".",
"length",
"/",
"args",
"[",
"'branch_capacity_factor'",
"]",
"[",
"'eHV'",
"]",
"network",
".",
"links",
".",
"loc",
"[",
"network",
".",
"links",
".",
"p_nom_extendable",
",",
"'capital_cost'",
"]",
"=",
"costDC",
"*",
"network",
".",
"links",
".",
"length",
"return",
"network"
] | Set capital costs for extendable lines in respect to PyPSA [€/MVA]
Parameters
----------
network : :class:`pypsa.Network
Overall container of PyPSA
args: dict containing settings from appl.py
cost110 : capital costs per km for 110kV lines and cables
default: 230€/MVA/km, source: costs for extra circuit in
dena Verteilnetzstudie, p. 146)
cost220 : capital costs per km for 220kV lines and cables
default: 280€/MVA/km, source: costs for extra circuit in
NEP 2025, capactity from most used 220 kV lines in model
cost380 : capital costs per km for 380kV lines and cables
default: 85€/MVA/km, source: costs for extra circuit in
NEP 2025, capactity from most used 380 kV lines in NEP
costDC : capital costs per km for DC-lines
default: 375€/MVA/km, source: costs for DC transmission line
in NEP 2035
------- | [
"Set",
"capital",
"costs",
"for",
"extendable",
"lines",
"in",
"respect",
"to",
"PyPSA",
"[",
"€",
"/",
"MVA",
"]",
"Parameters",
"----------",
"network",
":",
":",
"class",
":",
"pypsa",
".",
"Network",
"Overall",
"container",
"of",
"PyPSA",
"args",
":",
"dict",
"containing",
"settings",
"from",
"appl",
".",
"py",
"cost110",
":",
"capital",
"costs",
"per",
"km",
"for",
"110kV",
"lines",
"and",
"cables",
"default",
":",
"230€",
"/",
"MVA",
"/",
"km",
"source",
":",
"costs",
"for",
"extra",
"circuit",
"in",
"dena",
"Verteilnetzstudie",
"p",
".",
"146",
")",
"cost220",
":",
"capital",
"costs",
"per",
"km",
"for",
"220kV",
"lines",
"and",
"cables",
"default",
":",
"280€",
"/",
"MVA",
"/",
"km",
"source",
":",
"costs",
"for",
"extra",
"circuit",
"in",
"NEP",
"2025",
"capactity",
"from",
"most",
"used",
"220",
"kV",
"lines",
"in",
"model",
"cost380",
":",
"capital",
"costs",
"per",
"km",
"for",
"380kV",
"lines",
"and",
"cables",
"default",
":",
"85€",
"/",
"MVA",
"/",
"km",
"source",
":",
"costs",
"for",
"extra",
"circuit",
"in",
"NEP",
"2025",
"capactity",
"from",
"most",
"used",
"380",
"kV",
"lines",
"in",
"NEP",
"costDC",
":",
"capital",
"costs",
"per",
"km",
"for",
"DC",
"-",
"lines",
"default",
":",
"375€",
"/",
"MVA",
"/",
"km",
"source",
":",
"costs",
"for",
"DC",
"transmission",
"line",
"in",
"NEP",
"2035",
"-------"
] | train | https://github.com/openego/eTraGo/blob/2a8fc6d4368d0e9abe6fe0d0c39baf66ea0126b9/etrago/tools/utilities.py#L1158-L1199 | 0.006157 |
PmagPy/PmagPy | programs/thellier_magic.py | main | def main():
"""
NAME
thellier_magic.py
DESCRIPTION
plots Thellier-Thellier data in version 3.0 format
Reads saved interpretations from a specimen formatted table, default: specimens.txt
SYNTAX
thellier_magic.py [command line options]
OPTIONS
-h prints help message and quits
-f MEAS, set measurements input file, default is 'measurements.txt'
-WD: directory to output files to (default : current directory)
Note: if using Windows, all figures will output to current directory
-ID: directory to read files from (default : same as -WD)
-fsp PRIOR, set specimens.txt prior interpretations file, default is 'specimens.txt'
-fmt [svg,png,jpg], format for images - default is svg
-sav, saves plots without review (in format specified by -fmt key or default)
-spc SPEC, plots single specimen SPEC, saves plot with specified format
with optional -b bounds and quits
-n SPECIMENS, number of specimens to plot
OUTPUT
figures:
ALL: numbers refer to temperature steps in command line window
1) Arai plot: closed circles are zero-field first/infield
open circles are infield first/zero-field
triangles are pTRM checks
squares are pTRM tail checks
VDS is vector difference sum
diamonds are bounds for interpretation
2) Zijderveld plot: closed (open) symbols are X-Y (X-Z) planes
X rotated to NRM direction
3) (De/Re)Magnetization diagram:
circles are NRM remaining
squares are pTRM gained
4) equal area projections:
green triangles are pTRM gained direction
red (purple) circles are lower(upper) hemisphere of ZI step directions
blue (cyan) squares are lower(upper) hemisphere IZ step directions
5) Optional: TRM acquisition
6) Optional: TDS normalization
command line window:
list is: temperature step numbers, temperatures (C), Dec, Inc, Int (units of measuements)
list of possible commands: type letter followed by return to select option
saving of plots creates image files with specimen, plot type as name
"""
#
# parse command line options
#
if '-h' in sys.argv:
print(main.__doc__)
sys.exit()
dir_path = pmag.get_named_arg("-WD", default_val=".")
input_dir_path = pmag.get_named_arg('-ID', "")
input_dir_path, dir_path = pmag.fix_directories(input_dir_path, dir_path)
meas_file = pmag.get_named_arg(
"-f", default_val="measurements.txt")
#spec_file = pmag.get_named_arg(
# "-fsp", default_val="specimens.txt")
#crit_file = pmag.get_named_arg("-fcr", default_val="criteria.txt")
#spec_file = os.path.join(dir_path, spec_file)
#crit_file = os.path.join(dir_path, crit_file)
meas_file = pmag.resolve_file_name(meas_file, input_dir_path)
fmt = pmag.get_named_arg("-fmt", "svg")
save_plots = False
interactive = True
if '-sav' in sys.argv:
save_plots = True
interactive=False
spec = pmag.get_named_arg("-spc", default_val="")
n_specs = pmag.get_named_arg("-n", default_val="all")
try:
n_specs = int(n_specs)
except ValueError:
pass
ipmag.thellier_magic(meas_file, dir_path, input_dir_path,
spec, n_specs, save_plots, fmt, interactive) | python | def main():
"""
NAME
thellier_magic.py
DESCRIPTION
plots Thellier-Thellier data in version 3.0 format
Reads saved interpretations from a specimen formatted table, default: specimens.txt
SYNTAX
thellier_magic.py [command line options]
OPTIONS
-h prints help message and quits
-f MEAS, set measurements input file, default is 'measurements.txt'
-WD: directory to output files to (default : current directory)
Note: if using Windows, all figures will output to current directory
-ID: directory to read files from (default : same as -WD)
-fsp PRIOR, set specimens.txt prior interpretations file, default is 'specimens.txt'
-fmt [svg,png,jpg], format for images - default is svg
-sav, saves plots without review (in format specified by -fmt key or default)
-spc SPEC, plots single specimen SPEC, saves plot with specified format
with optional -b bounds and quits
-n SPECIMENS, number of specimens to plot
OUTPUT
figures:
ALL: numbers refer to temperature steps in command line window
1) Arai plot: closed circles are zero-field first/infield
open circles are infield first/zero-field
triangles are pTRM checks
squares are pTRM tail checks
VDS is vector difference sum
diamonds are bounds for interpretation
2) Zijderveld plot: closed (open) symbols are X-Y (X-Z) planes
X rotated to NRM direction
3) (De/Re)Magnetization diagram:
circles are NRM remaining
squares are pTRM gained
4) equal area projections:
green triangles are pTRM gained direction
red (purple) circles are lower(upper) hemisphere of ZI step directions
blue (cyan) squares are lower(upper) hemisphere IZ step directions
5) Optional: TRM acquisition
6) Optional: TDS normalization
command line window:
list is: temperature step numbers, temperatures (C), Dec, Inc, Int (units of measuements)
list of possible commands: type letter followed by return to select option
saving of plots creates image files with specimen, plot type as name
"""
#
# parse command line options
#
if '-h' in sys.argv:
print(main.__doc__)
sys.exit()
dir_path = pmag.get_named_arg("-WD", default_val=".")
input_dir_path = pmag.get_named_arg('-ID', "")
input_dir_path, dir_path = pmag.fix_directories(input_dir_path, dir_path)
meas_file = pmag.get_named_arg(
"-f", default_val="measurements.txt")
#spec_file = pmag.get_named_arg(
# "-fsp", default_val="specimens.txt")
#crit_file = pmag.get_named_arg("-fcr", default_val="criteria.txt")
#spec_file = os.path.join(dir_path, spec_file)
#crit_file = os.path.join(dir_path, crit_file)
meas_file = pmag.resolve_file_name(meas_file, input_dir_path)
fmt = pmag.get_named_arg("-fmt", "svg")
save_plots = False
interactive = True
if '-sav' in sys.argv:
save_plots = True
interactive=False
spec = pmag.get_named_arg("-spc", default_val="")
n_specs = pmag.get_named_arg("-n", default_val="all")
try:
n_specs = int(n_specs)
except ValueError:
pass
ipmag.thellier_magic(meas_file, dir_path, input_dir_path,
spec, n_specs, save_plots, fmt, interactive) | [
"def",
"main",
"(",
")",
":",
"#",
"# parse command line options",
"#",
"if",
"'-h'",
"in",
"sys",
".",
"argv",
":",
"print",
"(",
"main",
".",
"__doc__",
")",
"sys",
".",
"exit",
"(",
")",
"dir_path",
"=",
"pmag",
".",
"get_named_arg",
"(",
"\"-WD\"",
",",
"default_val",
"=",
"\".\"",
")",
"input_dir_path",
"=",
"pmag",
".",
"get_named_arg",
"(",
"'-ID'",
",",
"\"\"",
")",
"input_dir_path",
",",
"dir_path",
"=",
"pmag",
".",
"fix_directories",
"(",
"input_dir_path",
",",
"dir_path",
")",
"meas_file",
"=",
"pmag",
".",
"get_named_arg",
"(",
"\"-f\"",
",",
"default_val",
"=",
"\"measurements.txt\"",
")",
"#spec_file = pmag.get_named_arg(",
"# \"-fsp\", default_val=\"specimens.txt\")",
"#crit_file = pmag.get_named_arg(\"-fcr\", default_val=\"criteria.txt\")",
"#spec_file = os.path.join(dir_path, spec_file)",
"#crit_file = os.path.join(dir_path, crit_file)",
"meas_file",
"=",
"pmag",
".",
"resolve_file_name",
"(",
"meas_file",
",",
"input_dir_path",
")",
"fmt",
"=",
"pmag",
".",
"get_named_arg",
"(",
"\"-fmt\"",
",",
"\"svg\"",
")",
"save_plots",
"=",
"False",
"interactive",
"=",
"True",
"if",
"'-sav'",
"in",
"sys",
".",
"argv",
":",
"save_plots",
"=",
"True",
"interactive",
"=",
"False",
"spec",
"=",
"pmag",
".",
"get_named_arg",
"(",
"\"-spc\"",
",",
"default_val",
"=",
"\"\"",
")",
"n_specs",
"=",
"pmag",
".",
"get_named_arg",
"(",
"\"-n\"",
",",
"default_val",
"=",
"\"all\"",
")",
"try",
":",
"n_specs",
"=",
"int",
"(",
"n_specs",
")",
"except",
"ValueError",
":",
"pass",
"ipmag",
".",
"thellier_magic",
"(",
"meas_file",
",",
"dir_path",
",",
"input_dir_path",
",",
"spec",
",",
"n_specs",
",",
"save_plots",
",",
"fmt",
",",
"interactive",
")"
] | NAME
thellier_magic.py
DESCRIPTION
plots Thellier-Thellier data in version 3.0 format
Reads saved interpretations from a specimen formatted table, default: specimens.txt
SYNTAX
thellier_magic.py [command line options]
OPTIONS
-h prints help message and quits
-f MEAS, set measurements input file, default is 'measurements.txt'
-WD: directory to output files to (default : current directory)
Note: if using Windows, all figures will output to current directory
-ID: directory to read files from (default : same as -WD)
-fsp PRIOR, set specimens.txt prior interpretations file, default is 'specimens.txt'
-fmt [svg,png,jpg], format for images - default is svg
-sav, saves plots without review (in format specified by -fmt key or default)
-spc SPEC, plots single specimen SPEC, saves plot with specified format
with optional -b bounds and quits
-n SPECIMENS, number of specimens to plot
OUTPUT
figures:
ALL: numbers refer to temperature steps in command line window
1) Arai plot: closed circles are zero-field first/infield
open circles are infield first/zero-field
triangles are pTRM checks
squares are pTRM tail checks
VDS is vector difference sum
diamonds are bounds for interpretation
2) Zijderveld plot: closed (open) symbols are X-Y (X-Z) planes
X rotated to NRM direction
3) (De/Re)Magnetization diagram:
circles are NRM remaining
squares are pTRM gained
4) equal area projections:
green triangles are pTRM gained direction
red (purple) circles are lower(upper) hemisphere of ZI step directions
blue (cyan) squares are lower(upper) hemisphere IZ step directions
5) Optional: TRM acquisition
6) Optional: TDS normalization
command line window:
list is: temperature step numbers, temperatures (C), Dec, Inc, Int (units of measuements)
list of possible commands: type letter followed by return to select option
saving of plots creates image files with specimen, plot type as name | [
"NAME",
"thellier_magic",
".",
"py"
] | train | https://github.com/PmagPy/PmagPy/blob/c7984f8809bf40fe112e53dcc311a33293b62d0b/programs/thellier_magic.py#L18-L100 | 0.004055 |
google/prettytensor | prettytensor/pretty_tensor_class.py | _DeferredLayer.construct | def construct(self, **bindings):
"""Constructs the graph and returns either a tensor or a sequence.
Args:
**bindings: Arguments for every deferred parameter.
Returns:
The value that is placed into this.
"""
context = _assign_values_to_unbound_vars(self._unbound_vars, bindings)
context.update(self._partial_context)
return self._construct(context) | python | def construct(self, **bindings):
"""Constructs the graph and returns either a tensor or a sequence.
Args:
**bindings: Arguments for every deferred parameter.
Returns:
The value that is placed into this.
"""
context = _assign_values_to_unbound_vars(self._unbound_vars, bindings)
context.update(self._partial_context)
return self._construct(context) | [
"def",
"construct",
"(",
"self",
",",
"*",
"*",
"bindings",
")",
":",
"context",
"=",
"_assign_values_to_unbound_vars",
"(",
"self",
".",
"_unbound_vars",
",",
"bindings",
")",
"context",
".",
"update",
"(",
"self",
".",
"_partial_context",
")",
"return",
"self",
".",
"_construct",
"(",
"context",
")"
] | Constructs the graph and returns either a tensor or a sequence.
Args:
**bindings: Arguments for every deferred parameter.
Returns:
The value that is placed into this. | [
"Constructs",
"the",
"graph",
"and",
"returns",
"either",
"a",
"tensor",
"or",
"a",
"sequence",
"."
] | train | https://github.com/google/prettytensor/blob/75daa0b11252590f548da5647addc0ea610c4c45/prettytensor/pretty_tensor_class.py#L1238-L1248 | 0.002577 |
PyGithub/PyGithub | github/AuthenticatedUser.py | AuthenticatedUser.remove_from_watched | def remove_from_watched(self, watched):
"""
:calls: `DELETE /repos/:owner/:repo/subscription <http://developer.github.com/v3/activity/watching>`_
:param watched: :class:`github.Repository.Repository`
:rtype: None
"""
assert isinstance(watched, github.Repository.Repository), watched
headers, data = self._requester.requestJsonAndCheck(
"DELETE",
"/repos/" + watched._identity + "/subscription"
) | python | def remove_from_watched(self, watched):
"""
:calls: `DELETE /repos/:owner/:repo/subscription <http://developer.github.com/v3/activity/watching>`_
:param watched: :class:`github.Repository.Repository`
:rtype: None
"""
assert isinstance(watched, github.Repository.Repository), watched
headers, data = self._requester.requestJsonAndCheck(
"DELETE",
"/repos/" + watched._identity + "/subscription"
) | [
"def",
"remove_from_watched",
"(",
"self",
",",
"watched",
")",
":",
"assert",
"isinstance",
"(",
"watched",
",",
"github",
".",
"Repository",
".",
"Repository",
")",
",",
"watched",
"headers",
",",
"data",
"=",
"self",
".",
"_requester",
".",
"requestJsonAndCheck",
"(",
"\"DELETE\"",
",",
"\"/repos/\"",
"+",
"watched",
".",
"_identity",
"+",
"\"/subscription\"",
")"
] | :calls: `DELETE /repos/:owner/:repo/subscription <http://developer.github.com/v3/activity/watching>`_
:param watched: :class:`github.Repository.Repository`
:rtype: None | [
":",
"calls",
":",
"DELETE",
"/",
"repos",
"/",
":",
"owner",
"/",
":",
"repo",
"/",
"subscription",
"<http",
":",
"//",
"developer",
".",
"github",
".",
"com",
"/",
"v3",
"/",
"activity",
"/",
"watching",
">",
"_",
":",
"param",
"watched",
":",
":",
"class",
":",
"github",
".",
"Repository",
".",
"Repository",
":",
"rtype",
":",
"None"
] | train | https://github.com/PyGithub/PyGithub/blob/f716df86bbe7dc276c6596699fa9712b61ef974c/github/AuthenticatedUser.py#L1129-L1139 | 0.006211 |
xtrementl/focus | focus/daemon.py | TaskRunner._run_events | def _run_events(self, shutdown=False):
""" Runs event hooks for registered event plugins.
`shutdown`
Set to ``True`` to run task_end events;
otherwise, run task_run events.
"""
# run task_start events, if not ran already
if not self._ran_taskstart:
self._ran_taskstart = True
registration.run_event_hooks('task_start', self._task)
# run events
event = 'task_end' if shutdown else 'task_run'
registration.run_event_hooks(event, self._task)
# reclaim any subprocesses plugins may have forked
try:
os.waitpid(-1, os.P_NOWAIT)
except OSError:
pass | python | def _run_events(self, shutdown=False):
""" Runs event hooks for registered event plugins.
`shutdown`
Set to ``True`` to run task_end events;
otherwise, run task_run events.
"""
# run task_start events, if not ran already
if not self._ran_taskstart:
self._ran_taskstart = True
registration.run_event_hooks('task_start', self._task)
# run events
event = 'task_end' if shutdown else 'task_run'
registration.run_event_hooks(event, self._task)
# reclaim any subprocesses plugins may have forked
try:
os.waitpid(-1, os.P_NOWAIT)
except OSError:
pass | [
"def",
"_run_events",
"(",
"self",
",",
"shutdown",
"=",
"False",
")",
":",
"# run task_start events, if not ran already",
"if",
"not",
"self",
".",
"_ran_taskstart",
":",
"self",
".",
"_ran_taskstart",
"=",
"True",
"registration",
".",
"run_event_hooks",
"(",
"'task_start'",
",",
"self",
".",
"_task",
")",
"# run events",
"event",
"=",
"'task_end'",
"if",
"shutdown",
"else",
"'task_run'",
"registration",
".",
"run_event_hooks",
"(",
"event",
",",
"self",
".",
"_task",
")",
"# reclaim any subprocesses plugins may have forked",
"try",
":",
"os",
".",
"waitpid",
"(",
"-",
"1",
",",
"os",
".",
"P_NOWAIT",
")",
"except",
"OSError",
":",
"pass"
] | Runs event hooks for registered event plugins.
`shutdown`
Set to ``True`` to run task_end events;
otherwise, run task_run events. | [
"Runs",
"event",
"hooks",
"for",
"registered",
"event",
"plugins",
"."
] | train | https://github.com/xtrementl/focus/blob/cbbbc0b49a7409f9e0dc899de5b7e057f50838e4/focus/daemon.py#L485-L506 | 0.002766 |
ninuxorg/nodeshot | nodeshot/community/participation/models/vote.py | Vote.clean | def clean(self, *args, **kwargs):
"""
Check if votes can be inserted for parent node or parent layer
"""
if not self.pk:
# ensure voting for this node is allowed
if self.node.participation_settings.voting_allowed is not True:
raise ValidationError("Voting not allowed for this node")
if 'nodeshot.core.layers' in settings.INSTALLED_APPS:
layer = self.node.layer
# ensure voting for this layer is allowed
if layer.participation_settings.voting_allowed is not True:
raise ValidationError("Voting not allowed for this layer") | python | def clean(self, *args, **kwargs):
"""
Check if votes can be inserted for parent node or parent layer
"""
if not self.pk:
# ensure voting for this node is allowed
if self.node.participation_settings.voting_allowed is not True:
raise ValidationError("Voting not allowed for this node")
if 'nodeshot.core.layers' in settings.INSTALLED_APPS:
layer = self.node.layer
# ensure voting for this layer is allowed
if layer.participation_settings.voting_allowed is not True:
raise ValidationError("Voting not allowed for this layer") | [
"def",
"clean",
"(",
"self",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"not",
"self",
".",
"pk",
":",
"# ensure voting for this node is allowed",
"if",
"self",
".",
"node",
".",
"participation_settings",
".",
"voting_allowed",
"is",
"not",
"True",
":",
"raise",
"ValidationError",
"(",
"\"Voting not allowed for this node\"",
")",
"if",
"'nodeshot.core.layers'",
"in",
"settings",
".",
"INSTALLED_APPS",
":",
"layer",
"=",
"self",
".",
"node",
".",
"layer",
"# ensure voting for this layer is allowed",
"if",
"layer",
".",
"participation_settings",
".",
"voting_allowed",
"is",
"not",
"True",
":",
"raise",
"ValidationError",
"(",
"\"Voting not allowed for this layer\"",
")"
] | Check if votes can be inserted for parent node or parent layer | [
"Check",
"if",
"votes",
"can",
"be",
"inserted",
"for",
"parent",
"node",
"or",
"parent",
"layer"
] | train | https://github.com/ninuxorg/nodeshot/blob/2466f0a55f522b2696026f196436ce7ba3f1e5c6/nodeshot/community/participation/models/vote.py#L54-L68 | 0.002959 |
blackecho/Deep-Learning-TensorFlow | yadlt/models/convolutional/conv_net.py | ConvolutionalNetwork._create_layers | def _create_layers(self, n_classes):
"""Create the layers of the model from self.layers.
:param n_classes: number of classes
:return: self
"""
next_layer_feed = tf.reshape(self.input_data,
[-1, self.original_shape[0],
self.original_shape[1],
self.original_shape[2]])
prev_output_dim = self.original_shape[2]
# this flags indicates whether we are building the first dense layer
first_full = True
self.W_vars = []
self.B_vars = []
for i, l in enumerate(self.layers.split(',')):
node = l.split('-')
node_type = node[0]
if node_type == 'conv2d':
# ################### #
# Convolutional Layer #
# ################### #
# fx, fy = shape of the convolutional filter
# feature_maps = number of output dimensions
fx, fy, feature_maps, stride = int(node[1]),\
int(node[2]), int(node[3]), int(node[4])
print('Building Convolutional layer with %d input channels\
and %d %dx%d filters with stride %d' %
(prev_output_dim, feature_maps, fx, fy, stride))
# Create weights and biases
W_conv = self.weight_variable(
[fx, fy, prev_output_dim, feature_maps])
b_conv = self.bias_variable([feature_maps])
self.W_vars.append(W_conv)
self.B_vars.append(b_conv)
# Convolution and Activation function
h_conv = tf.nn.relu(
self.conv2d(next_layer_feed, W_conv, stride) + b_conv)
# keep track of the number of output dims of the previous layer
prev_output_dim = feature_maps
# output node of the last layer
next_layer_feed = h_conv
elif node_type == 'maxpool':
# ################# #
# Max Pooling Layer #
# ################# #
ksize = int(node[1])
print('Building Max Pooling layer with size %d' % ksize)
next_layer_feed = self.max_pool(next_layer_feed, ksize)
elif node_type == 'full':
# ####################### #
# Densely Connected Layer #
# ####################### #
if first_full: # first fully connected layer
dim = int(node[1])
shp = next_layer_feed.get_shape()
tmpx = shp[1].value
tmpy = shp[2].value
fanin = tmpx * tmpy * prev_output_dim
print('Building fully connected layer with %d in units\
and %d out units' % (fanin, dim))
W_fc = self.weight_variable([fanin, dim])
b_fc = self.bias_variable([dim])
self.W_vars.append(W_fc)
self.B_vars.append(b_fc)
h_pool_flat = tf.reshape(next_layer_feed, [-1, fanin])
h_fc = tf.nn.relu(tf.add(
tf.matmul(h_pool_flat, W_fc),
b_fc))
h_fc_drop = tf.nn.dropout(h_fc, self.keep_prob)
prev_output_dim = dim
next_layer_feed = h_fc_drop
first_full = False
else: # not first fully connected layer
dim = int(node[1])
W_fc = self.weight_variable([prev_output_dim, dim])
b_fc = self.bias_variable([dim])
self.W_vars.append(W_fc)
self.B_vars.append(b_fc)
h_fc = tf.nn.relu(tf.add(
tf.matmul(next_layer_feed, W_fc), b_fc))
h_fc_drop = tf.nn.dropout(h_fc, self.keep_prob)
prev_output_dim = dim
next_layer_feed = h_fc_drop
elif node_type == 'softmax':
# ############# #
# Softmax Layer #
# ############# #
print('Building softmax layer with %d in units and\
%d out units' % (prev_output_dim, n_classes))
W_sm = self.weight_variable([prev_output_dim, n_classes])
b_sm = self.bias_variable([n_classes])
self.W_vars.append(W_sm)
self.B_vars.append(b_sm)
self.mod_y = tf.add(tf.matmul(next_layer_feed, W_sm), b_sm) | python | def _create_layers(self, n_classes):
"""Create the layers of the model from self.layers.
:param n_classes: number of classes
:return: self
"""
next_layer_feed = tf.reshape(self.input_data,
[-1, self.original_shape[0],
self.original_shape[1],
self.original_shape[2]])
prev_output_dim = self.original_shape[2]
# this flags indicates whether we are building the first dense layer
first_full = True
self.W_vars = []
self.B_vars = []
for i, l in enumerate(self.layers.split(',')):
node = l.split('-')
node_type = node[0]
if node_type == 'conv2d':
# ################### #
# Convolutional Layer #
# ################### #
# fx, fy = shape of the convolutional filter
# feature_maps = number of output dimensions
fx, fy, feature_maps, stride = int(node[1]),\
int(node[2]), int(node[3]), int(node[4])
print('Building Convolutional layer with %d input channels\
and %d %dx%d filters with stride %d' %
(prev_output_dim, feature_maps, fx, fy, stride))
# Create weights and biases
W_conv = self.weight_variable(
[fx, fy, prev_output_dim, feature_maps])
b_conv = self.bias_variable([feature_maps])
self.W_vars.append(W_conv)
self.B_vars.append(b_conv)
# Convolution and Activation function
h_conv = tf.nn.relu(
self.conv2d(next_layer_feed, W_conv, stride) + b_conv)
# keep track of the number of output dims of the previous layer
prev_output_dim = feature_maps
# output node of the last layer
next_layer_feed = h_conv
elif node_type == 'maxpool':
# ################# #
# Max Pooling Layer #
# ################# #
ksize = int(node[1])
print('Building Max Pooling layer with size %d' % ksize)
next_layer_feed = self.max_pool(next_layer_feed, ksize)
elif node_type == 'full':
# ####################### #
# Densely Connected Layer #
# ####################### #
if first_full: # first fully connected layer
dim = int(node[1])
shp = next_layer_feed.get_shape()
tmpx = shp[1].value
tmpy = shp[2].value
fanin = tmpx * tmpy * prev_output_dim
print('Building fully connected layer with %d in units\
and %d out units' % (fanin, dim))
W_fc = self.weight_variable([fanin, dim])
b_fc = self.bias_variable([dim])
self.W_vars.append(W_fc)
self.B_vars.append(b_fc)
h_pool_flat = tf.reshape(next_layer_feed, [-1, fanin])
h_fc = tf.nn.relu(tf.add(
tf.matmul(h_pool_flat, W_fc),
b_fc))
h_fc_drop = tf.nn.dropout(h_fc, self.keep_prob)
prev_output_dim = dim
next_layer_feed = h_fc_drop
first_full = False
else: # not first fully connected layer
dim = int(node[1])
W_fc = self.weight_variable([prev_output_dim, dim])
b_fc = self.bias_variable([dim])
self.W_vars.append(W_fc)
self.B_vars.append(b_fc)
h_fc = tf.nn.relu(tf.add(
tf.matmul(next_layer_feed, W_fc), b_fc))
h_fc_drop = tf.nn.dropout(h_fc, self.keep_prob)
prev_output_dim = dim
next_layer_feed = h_fc_drop
elif node_type == 'softmax':
# ############# #
# Softmax Layer #
# ############# #
print('Building softmax layer with %d in units and\
%d out units' % (prev_output_dim, n_classes))
W_sm = self.weight_variable([prev_output_dim, n_classes])
b_sm = self.bias_variable([n_classes])
self.W_vars.append(W_sm)
self.B_vars.append(b_sm)
self.mod_y = tf.add(tf.matmul(next_layer_feed, W_sm), b_sm) | [
"def",
"_create_layers",
"(",
"self",
",",
"n_classes",
")",
":",
"next_layer_feed",
"=",
"tf",
".",
"reshape",
"(",
"self",
".",
"input_data",
",",
"[",
"-",
"1",
",",
"self",
".",
"original_shape",
"[",
"0",
"]",
",",
"self",
".",
"original_shape",
"[",
"1",
"]",
",",
"self",
".",
"original_shape",
"[",
"2",
"]",
"]",
")",
"prev_output_dim",
"=",
"self",
".",
"original_shape",
"[",
"2",
"]",
"# this flags indicates whether we are building the first dense layer",
"first_full",
"=",
"True",
"self",
".",
"W_vars",
"=",
"[",
"]",
"self",
".",
"B_vars",
"=",
"[",
"]",
"for",
"i",
",",
"l",
"in",
"enumerate",
"(",
"self",
".",
"layers",
".",
"split",
"(",
"','",
")",
")",
":",
"node",
"=",
"l",
".",
"split",
"(",
"'-'",
")",
"node_type",
"=",
"node",
"[",
"0",
"]",
"if",
"node_type",
"==",
"'conv2d'",
":",
"# ################### #",
"# Convolutional Layer #",
"# ################### #",
"# fx, fy = shape of the convolutional filter",
"# feature_maps = number of output dimensions",
"fx",
",",
"fy",
",",
"feature_maps",
",",
"stride",
"=",
"int",
"(",
"node",
"[",
"1",
"]",
")",
",",
"int",
"(",
"node",
"[",
"2",
"]",
")",
",",
"int",
"(",
"node",
"[",
"3",
"]",
")",
",",
"int",
"(",
"node",
"[",
"4",
"]",
")",
"print",
"(",
"'Building Convolutional layer with %d input channels\\\n and %d %dx%d filters with stride %d'",
"%",
"(",
"prev_output_dim",
",",
"feature_maps",
",",
"fx",
",",
"fy",
",",
"stride",
")",
")",
"# Create weights and biases",
"W_conv",
"=",
"self",
".",
"weight_variable",
"(",
"[",
"fx",
",",
"fy",
",",
"prev_output_dim",
",",
"feature_maps",
"]",
")",
"b_conv",
"=",
"self",
".",
"bias_variable",
"(",
"[",
"feature_maps",
"]",
")",
"self",
".",
"W_vars",
".",
"append",
"(",
"W_conv",
")",
"self",
".",
"B_vars",
".",
"append",
"(",
"b_conv",
")",
"# Convolution and Activation function",
"h_conv",
"=",
"tf",
".",
"nn",
".",
"relu",
"(",
"self",
".",
"conv2d",
"(",
"next_layer_feed",
",",
"W_conv",
",",
"stride",
")",
"+",
"b_conv",
")",
"# keep track of the number of output dims of the previous layer",
"prev_output_dim",
"=",
"feature_maps",
"# output node of the last layer",
"next_layer_feed",
"=",
"h_conv",
"elif",
"node_type",
"==",
"'maxpool'",
":",
"# ################# #",
"# Max Pooling Layer #",
"# ################# #",
"ksize",
"=",
"int",
"(",
"node",
"[",
"1",
"]",
")",
"print",
"(",
"'Building Max Pooling layer with size %d'",
"%",
"ksize",
")",
"next_layer_feed",
"=",
"self",
".",
"max_pool",
"(",
"next_layer_feed",
",",
"ksize",
")",
"elif",
"node_type",
"==",
"'full'",
":",
"# ####################### #",
"# Densely Connected Layer #",
"# ####################### #",
"if",
"first_full",
":",
"# first fully connected layer",
"dim",
"=",
"int",
"(",
"node",
"[",
"1",
"]",
")",
"shp",
"=",
"next_layer_feed",
".",
"get_shape",
"(",
")",
"tmpx",
"=",
"shp",
"[",
"1",
"]",
".",
"value",
"tmpy",
"=",
"shp",
"[",
"2",
"]",
".",
"value",
"fanin",
"=",
"tmpx",
"*",
"tmpy",
"*",
"prev_output_dim",
"print",
"(",
"'Building fully connected layer with %d in units\\\n and %d out units'",
"%",
"(",
"fanin",
",",
"dim",
")",
")",
"W_fc",
"=",
"self",
".",
"weight_variable",
"(",
"[",
"fanin",
",",
"dim",
"]",
")",
"b_fc",
"=",
"self",
".",
"bias_variable",
"(",
"[",
"dim",
"]",
")",
"self",
".",
"W_vars",
".",
"append",
"(",
"W_fc",
")",
"self",
".",
"B_vars",
".",
"append",
"(",
"b_fc",
")",
"h_pool_flat",
"=",
"tf",
".",
"reshape",
"(",
"next_layer_feed",
",",
"[",
"-",
"1",
",",
"fanin",
"]",
")",
"h_fc",
"=",
"tf",
".",
"nn",
".",
"relu",
"(",
"tf",
".",
"add",
"(",
"tf",
".",
"matmul",
"(",
"h_pool_flat",
",",
"W_fc",
")",
",",
"b_fc",
")",
")",
"h_fc_drop",
"=",
"tf",
".",
"nn",
".",
"dropout",
"(",
"h_fc",
",",
"self",
".",
"keep_prob",
")",
"prev_output_dim",
"=",
"dim",
"next_layer_feed",
"=",
"h_fc_drop",
"first_full",
"=",
"False",
"else",
":",
"# not first fully connected layer",
"dim",
"=",
"int",
"(",
"node",
"[",
"1",
"]",
")",
"W_fc",
"=",
"self",
".",
"weight_variable",
"(",
"[",
"prev_output_dim",
",",
"dim",
"]",
")",
"b_fc",
"=",
"self",
".",
"bias_variable",
"(",
"[",
"dim",
"]",
")",
"self",
".",
"W_vars",
".",
"append",
"(",
"W_fc",
")",
"self",
".",
"B_vars",
".",
"append",
"(",
"b_fc",
")",
"h_fc",
"=",
"tf",
".",
"nn",
".",
"relu",
"(",
"tf",
".",
"add",
"(",
"tf",
".",
"matmul",
"(",
"next_layer_feed",
",",
"W_fc",
")",
",",
"b_fc",
")",
")",
"h_fc_drop",
"=",
"tf",
".",
"nn",
".",
"dropout",
"(",
"h_fc",
",",
"self",
".",
"keep_prob",
")",
"prev_output_dim",
"=",
"dim",
"next_layer_feed",
"=",
"h_fc_drop",
"elif",
"node_type",
"==",
"'softmax'",
":",
"# ############# #",
"# Softmax Layer #",
"# ############# #",
"print",
"(",
"'Building softmax layer with %d in units and\\\n %d out units'",
"%",
"(",
"prev_output_dim",
",",
"n_classes",
")",
")",
"W_sm",
"=",
"self",
".",
"weight_variable",
"(",
"[",
"prev_output_dim",
",",
"n_classes",
"]",
")",
"b_sm",
"=",
"self",
".",
"bias_variable",
"(",
"[",
"n_classes",
"]",
")",
"self",
".",
"W_vars",
".",
"append",
"(",
"W_sm",
")",
"self",
".",
"B_vars",
".",
"append",
"(",
"b_sm",
")",
"self",
".",
"mod_y",
"=",
"tf",
".",
"add",
"(",
"tf",
".",
"matmul",
"(",
"next_layer_feed",
",",
"W_sm",
")",
",",
"b_sm",
")"
] | Create the layers of the model from self.layers.
:param n_classes: number of classes
:return: self | [
"Create",
"the",
"layers",
"of",
"the",
"model",
"from",
"self",
".",
"layers",
"."
] | train | https://github.com/blackecho/Deep-Learning-TensorFlow/blob/ddeb1f2848da7b7bee166ad2152b4afc46bb2086/yadlt/models/convolutional/conv_net.py#L131-L258 | 0.000421 |
OSSOS/MOP | src/jjk/preproc/MOPdisplay.py | mark | def mark(x,y,label=None):
"""Mark a circle on the current image"""
if label is not None:
os.system("xpaset -p ds9 regions color red ")
cmd="echo 'image; text %d %d # text={%s}' | xpaset ds9 regions " % ( x,y,label)
else:
os.system("xpaset -p ds9 regions color blue")
cmd="echo 'image; circle %d %d 10 ' | xpaset ds9 regions " % (x,y)
os.system(cmd)
return | python | def mark(x,y,label=None):
"""Mark a circle on the current image"""
if label is not None:
os.system("xpaset -p ds9 regions color red ")
cmd="echo 'image; text %d %d # text={%s}' | xpaset ds9 regions " % ( x,y,label)
else:
os.system("xpaset -p ds9 regions color blue")
cmd="echo 'image; circle %d %d 10 ' | xpaset ds9 regions " % (x,y)
os.system(cmd)
return | [
"def",
"mark",
"(",
"x",
",",
"y",
",",
"label",
"=",
"None",
")",
":",
"if",
"label",
"is",
"not",
"None",
":",
"os",
".",
"system",
"(",
"\"xpaset -p ds9 regions color red \"",
")",
"cmd",
"=",
"\"echo 'image; text %d %d # text={%s}' | xpaset ds9 regions \"",
"%",
"(",
"x",
",",
"y",
",",
"label",
")",
"else",
":",
"os",
".",
"system",
"(",
"\"xpaset -p ds9 regions color blue\"",
")",
"cmd",
"=",
"\"echo 'image; circle %d %d 10 ' | xpaset ds9 regions \"",
"%",
"(",
"x",
",",
"y",
")",
"os",
".",
"system",
"(",
"cmd",
")",
"return"
] | Mark a circle on the current image | [
"Mark",
"a",
"circle",
"on",
"the",
"current",
"image"
] | train | https://github.com/OSSOS/MOP/blob/94f91d32ad5ec081d5a1ebd67604a838003465af/src/jjk/preproc/MOPdisplay.py#L72-L81 | 0.065445 |
benvanwerkhoven/kernel_tuner | kernel_tuner/strategies/minimize.py | _cost_func | def _cost_func(x, kernel_options, tuning_options, runner, results, cache):
""" Cost function used by minimize """
error_time = 1e20
logging.debug('_cost_func called')
logging.debug('x: ' + str(x))
x_key = ",".join([str(i) for i in x])
if x_key in cache:
return cache[x_key]
#snap values in x to nearest actual value for each parameter unscale x if needed
if tuning_options.scaling:
params = unscale_and_snap_to_nearest(x, tuning_options.tune_params, tuning_options.eps)
else:
params = snap_to_nearest_config(x, tuning_options.tune_params)
logging.debug('params ' + str(params))
x_int = ",".join([str(i) for i in params])
if x_int in cache:
return cache[x_int]
#check if this is a legal (non-restricted) parameter instance
if tuning_options.restrictions:
legal = util.check_restrictions(tuning_options.restrictions, params, tuning_options.tune_params.keys(), tuning_options.verbose)
if not legal:
cache[x_int] = error_time
cache[x_key] = error_time
return error_time
#compile and benchmark this instance
res, _ = runner.run([params], kernel_options, tuning_options)
#append to tuning results
if res:
results.append(res[0])
cache[x_int] = res[0]['time']
cache[x_key] = res[0]['time']
return res[0]['time']
cache[x_int] = error_time
cache[x_key] = error_time
return error_time | python | def _cost_func(x, kernel_options, tuning_options, runner, results, cache):
""" Cost function used by minimize """
error_time = 1e20
logging.debug('_cost_func called')
logging.debug('x: ' + str(x))
x_key = ",".join([str(i) for i in x])
if x_key in cache:
return cache[x_key]
#snap values in x to nearest actual value for each parameter unscale x if needed
if tuning_options.scaling:
params = unscale_and_snap_to_nearest(x, tuning_options.tune_params, tuning_options.eps)
else:
params = snap_to_nearest_config(x, tuning_options.tune_params)
logging.debug('params ' + str(params))
x_int = ",".join([str(i) for i in params])
if x_int in cache:
return cache[x_int]
#check if this is a legal (non-restricted) parameter instance
if tuning_options.restrictions:
legal = util.check_restrictions(tuning_options.restrictions, params, tuning_options.tune_params.keys(), tuning_options.verbose)
if not legal:
cache[x_int] = error_time
cache[x_key] = error_time
return error_time
#compile and benchmark this instance
res, _ = runner.run([params], kernel_options, tuning_options)
#append to tuning results
if res:
results.append(res[0])
cache[x_int] = res[0]['time']
cache[x_key] = res[0]['time']
return res[0]['time']
cache[x_int] = error_time
cache[x_key] = error_time
return error_time | [
"def",
"_cost_func",
"(",
"x",
",",
"kernel_options",
",",
"tuning_options",
",",
"runner",
",",
"results",
",",
"cache",
")",
":",
"error_time",
"=",
"1e20",
"logging",
".",
"debug",
"(",
"'_cost_func called'",
")",
"logging",
".",
"debug",
"(",
"'x: '",
"+",
"str",
"(",
"x",
")",
")",
"x_key",
"=",
"\",\"",
".",
"join",
"(",
"[",
"str",
"(",
"i",
")",
"for",
"i",
"in",
"x",
"]",
")",
"if",
"x_key",
"in",
"cache",
":",
"return",
"cache",
"[",
"x_key",
"]",
"#snap values in x to nearest actual value for each parameter unscale x if needed",
"if",
"tuning_options",
".",
"scaling",
":",
"params",
"=",
"unscale_and_snap_to_nearest",
"(",
"x",
",",
"tuning_options",
".",
"tune_params",
",",
"tuning_options",
".",
"eps",
")",
"else",
":",
"params",
"=",
"snap_to_nearest_config",
"(",
"x",
",",
"tuning_options",
".",
"tune_params",
")",
"logging",
".",
"debug",
"(",
"'params '",
"+",
"str",
"(",
"params",
")",
")",
"x_int",
"=",
"\",\"",
".",
"join",
"(",
"[",
"str",
"(",
"i",
")",
"for",
"i",
"in",
"params",
"]",
")",
"if",
"x_int",
"in",
"cache",
":",
"return",
"cache",
"[",
"x_int",
"]",
"#check if this is a legal (non-restricted) parameter instance",
"if",
"tuning_options",
".",
"restrictions",
":",
"legal",
"=",
"util",
".",
"check_restrictions",
"(",
"tuning_options",
".",
"restrictions",
",",
"params",
",",
"tuning_options",
".",
"tune_params",
".",
"keys",
"(",
")",
",",
"tuning_options",
".",
"verbose",
")",
"if",
"not",
"legal",
":",
"cache",
"[",
"x_int",
"]",
"=",
"error_time",
"cache",
"[",
"x_key",
"]",
"=",
"error_time",
"return",
"error_time",
"#compile and benchmark this instance",
"res",
",",
"_",
"=",
"runner",
".",
"run",
"(",
"[",
"params",
"]",
",",
"kernel_options",
",",
"tuning_options",
")",
"#append to tuning results",
"if",
"res",
":",
"results",
".",
"append",
"(",
"res",
"[",
"0",
"]",
")",
"cache",
"[",
"x_int",
"]",
"=",
"res",
"[",
"0",
"]",
"[",
"'time'",
"]",
"cache",
"[",
"x_key",
"]",
"=",
"res",
"[",
"0",
"]",
"[",
"'time'",
"]",
"return",
"res",
"[",
"0",
"]",
"[",
"'time'",
"]",
"cache",
"[",
"x_int",
"]",
"=",
"error_time",
"cache",
"[",
"x_key",
"]",
"=",
"error_time",
"return",
"error_time"
] | Cost function used by minimize | [
"Cost",
"function",
"used",
"by",
"minimize"
] | train | https://github.com/benvanwerkhoven/kernel_tuner/blob/cfcb5da5e510db494f8219c22566ab65d5fcbd9f/kernel_tuner/strategies/minimize.py#L60-L103 | 0.005398 |
daler/metaseq | metaseq/results_table.py | DifferentialExpressionResults.enriched | def enriched(self, thresh=0.05, idx=True):
"""
Enriched features.
{threshdoc}
"""
return self.upregulated(thresh=thresh, idx=idx) | python | def enriched(self, thresh=0.05, idx=True):
"""
Enriched features.
{threshdoc}
"""
return self.upregulated(thresh=thresh, idx=idx) | [
"def",
"enriched",
"(",
"self",
",",
"thresh",
"=",
"0.05",
",",
"idx",
"=",
"True",
")",
":",
"return",
"self",
".",
"upregulated",
"(",
"thresh",
"=",
"thresh",
",",
"idx",
"=",
"idx",
")"
] | Enriched features.
{threshdoc} | [
"Enriched",
"features",
"."
] | train | https://github.com/daler/metaseq/blob/fa875d1f72317aa7ef95cb128b739956b16eef9f/metaseq/results_table.py#L814-L820 | 0.011765 |
frankban/django-endless-pagination | endless_pagination/views.py | AjaxMultipleObjectTemplateResponseMixin.get_template_names | def get_template_names(self):
"""Switch the templates for Ajax requests."""
request = self.request
querystring_key = request.REQUEST.get('querystring_key', PAGE_LABEL)
if request.is_ajax() and querystring_key == self.key:
return [self.page_template]
return super(
AjaxMultipleObjectTemplateResponseMixin, self).get_template_names() | python | def get_template_names(self):
"""Switch the templates for Ajax requests."""
request = self.request
querystring_key = request.REQUEST.get('querystring_key', PAGE_LABEL)
if request.is_ajax() and querystring_key == self.key:
return [self.page_template]
return super(
AjaxMultipleObjectTemplateResponseMixin, self).get_template_names() | [
"def",
"get_template_names",
"(",
"self",
")",
":",
"request",
"=",
"self",
".",
"request",
"querystring_key",
"=",
"request",
".",
"REQUEST",
".",
"get",
"(",
"'querystring_key'",
",",
"PAGE_LABEL",
")",
"if",
"request",
".",
"is_ajax",
"(",
")",
"and",
"querystring_key",
"==",
"self",
".",
"key",
":",
"return",
"[",
"self",
".",
"page_template",
"]",
"return",
"super",
"(",
"AjaxMultipleObjectTemplateResponseMixin",
",",
"self",
")",
".",
"get_template_names",
"(",
")"
] | Switch the templates for Ajax requests. | [
"Switch",
"the",
"templates",
"for",
"Ajax",
"requests",
"."
] | train | https://github.com/frankban/django-endless-pagination/blob/4814fe7cf81277efe35e96b88f57cc260a771255/endless_pagination/views.py#L131-L138 | 0.005063 |
Esri/ArcREST | src/arcrest/manageags/_security.py | Security.addRole | def addRole(self, name, description=""):
""" Adds a role to the role store. This operation is available only
when the role store is a read-write store such as the default
ArcGIS Server store.
If the name of the role exists in the role store, an error will
be returned.
Input:
rolename - The name of the role. The name must be unique in the
role store.
description - An optional field to add comments or a
description for the role.
Output:
JSON message as dictionary
"""
params = {
"f" : "json",
"rolename" : name,
"description" : description
}
aURL = self._url + "/roles/add"
return self._post(url=aURL, param_dict=params,
securityHandler=self._securityHandler,
proxy_url=self._proxy_url,
proxy_port=self._proxy_port) | python | def addRole(self, name, description=""):
""" Adds a role to the role store. This operation is available only
when the role store is a read-write store such as the default
ArcGIS Server store.
If the name of the role exists in the role store, an error will
be returned.
Input:
rolename - The name of the role. The name must be unique in the
role store.
description - An optional field to add comments or a
description for the role.
Output:
JSON message as dictionary
"""
params = {
"f" : "json",
"rolename" : name,
"description" : description
}
aURL = self._url + "/roles/add"
return self._post(url=aURL, param_dict=params,
securityHandler=self._securityHandler,
proxy_url=self._proxy_url,
proxy_port=self._proxy_port) | [
"def",
"addRole",
"(",
"self",
",",
"name",
",",
"description",
"=",
"\"\"",
")",
":",
"params",
"=",
"{",
"\"f\"",
":",
"\"json\"",
",",
"\"rolename\"",
":",
"name",
",",
"\"description\"",
":",
"description",
"}",
"aURL",
"=",
"self",
".",
"_url",
"+",
"\"/roles/add\"",
"return",
"self",
".",
"_post",
"(",
"url",
"=",
"aURL",
",",
"param_dict",
"=",
"params",
",",
"securityHandler",
"=",
"self",
".",
"_securityHandler",
",",
"proxy_url",
"=",
"self",
".",
"_proxy_url",
",",
"proxy_port",
"=",
"self",
".",
"_proxy_port",
")"
] | Adds a role to the role store. This operation is available only
when the role store is a read-write store such as the default
ArcGIS Server store.
If the name of the role exists in the role store, an error will
be returned.
Input:
rolename - The name of the role. The name must be unique in the
role store.
description - An optional field to add comments or a
description for the role.
Output:
JSON message as dictionary | [
"Adds",
"a",
"role",
"to",
"the",
"role",
"store",
".",
"This",
"operation",
"is",
"available",
"only",
"when",
"the",
"role",
"store",
"is",
"a",
"read",
"-",
"write",
"store",
"such",
"as",
"the",
"default",
"ArcGIS",
"Server",
"store",
".",
"If",
"the",
"name",
"of",
"the",
"role",
"exists",
"in",
"the",
"role",
"store",
"an",
"error",
"will",
"be",
"returned",
".",
"Input",
":",
"rolename",
"-",
"The",
"name",
"of",
"the",
"role",
".",
"The",
"name",
"must",
"be",
"unique",
"in",
"the",
"role",
"store",
".",
"description",
"-",
"An",
"optional",
"field",
"to",
"add",
"comments",
"or",
"a",
"description",
"for",
"the",
"role",
".",
"Output",
":",
"JSON",
"message",
"as",
"dictionary"
] | train | https://github.com/Esri/ArcREST/blob/ab240fde2b0200f61d4a5f6df033516e53f2f416/src/arcrest/manageags/_security.py#L102-L125 | 0.007576 |
Alignak-monitoring/alignak | alignak/scheduler.py | Scheduler.get_new_actions | def get_new_actions(self):
"""Call 'get_new_actions' hook point
Iter over all hosts and services to add new actions in internal lists
:return: None
"""
_t0 = time.time()
self.hook_point('get_new_actions')
statsmgr.timer('hook.get-new-actions', time.time() - _t0)
# ask for service and hosts their next check
for elt in self.all_my_hosts_and_services():
for action in elt.actions:
logger.debug("Got a new action for %s: %s", elt, action)
self.add(action)
# We take all, we can clear it
elt.actions = [] | python | def get_new_actions(self):
"""Call 'get_new_actions' hook point
Iter over all hosts and services to add new actions in internal lists
:return: None
"""
_t0 = time.time()
self.hook_point('get_new_actions')
statsmgr.timer('hook.get-new-actions', time.time() - _t0)
# ask for service and hosts their next check
for elt in self.all_my_hosts_and_services():
for action in elt.actions:
logger.debug("Got a new action for %s: %s", elt, action)
self.add(action)
# We take all, we can clear it
elt.actions = [] | [
"def",
"get_new_actions",
"(",
"self",
")",
":",
"_t0",
"=",
"time",
".",
"time",
"(",
")",
"self",
".",
"hook_point",
"(",
"'get_new_actions'",
")",
"statsmgr",
".",
"timer",
"(",
"'hook.get-new-actions'",
",",
"time",
".",
"time",
"(",
")",
"-",
"_t0",
")",
"# ask for service and hosts their next check",
"for",
"elt",
"in",
"self",
".",
"all_my_hosts_and_services",
"(",
")",
":",
"for",
"action",
"in",
"elt",
".",
"actions",
":",
"logger",
".",
"debug",
"(",
"\"Got a new action for %s: %s\"",
",",
"elt",
",",
"action",
")",
"self",
".",
"add",
"(",
"action",
")",
"# We take all, we can clear it",
"elt",
".",
"actions",
"=",
"[",
"]"
] | Call 'get_new_actions' hook point
Iter over all hosts and services to add new actions in internal lists
:return: None | [
"Call",
"get_new_actions",
"hook",
"point",
"Iter",
"over",
"all",
"hosts",
"and",
"services",
"to",
"add",
"new",
"actions",
"in",
"internal",
"lists"
] | train | https://github.com/Alignak-monitoring/alignak/blob/f3c145207e83159b799d3714e4241399c7740a64/alignak/scheduler.py#L1805-L1820 | 0.003115 |
Workiva/furious | furious/processors.py | encode_exception | def encode_exception(exception):
"""Encode exception to a form that can be passed around and serialized.
This will grab the stack, then strip off the last two calls which are
encode_exception and the function that called it.
"""
import sys
return AsyncException(unicode(exception),
exception.args,
sys.exc_info(),
exception) | python | def encode_exception(exception):
"""Encode exception to a form that can be passed around and serialized.
This will grab the stack, then strip off the last two calls which are
encode_exception and the function that called it.
"""
import sys
return AsyncException(unicode(exception),
exception.args,
sys.exc_info(),
exception) | [
"def",
"encode_exception",
"(",
"exception",
")",
":",
"import",
"sys",
"return",
"AsyncException",
"(",
"unicode",
"(",
"exception",
")",
",",
"exception",
".",
"args",
",",
"sys",
".",
"exc_info",
"(",
")",
",",
"exception",
")"
] | Encode exception to a form that can be passed around and serialized.
This will grab the stack, then strip off the last two calls which are
encode_exception and the function that called it. | [
"Encode",
"exception",
"to",
"a",
"form",
"that",
"can",
"be",
"passed",
"around",
"and",
"serialized",
"."
] | train | https://github.com/Workiva/furious/blob/c29823ec8b98549e7439d7273aa064d1e5830632/furious/processors.py#L100-L110 | 0.002342 |
hsolbrig/PyShEx | pyshex/shape_expressions_language/p5_5_shapes_and_triple_expressions.py | matchesCardinality | def matchesCardinality(cntxt: Context, T: RDFGraph, expr: Union[ShExJ.tripleExpr, ShExJ.tripleExprLabel],
c: DebugContext) -> bool:
""" Evaluate cardinality expression
expr has a cardinality of min and/or max not equal to 1, where a max of -1 is treated as unbounded, and
T can be partitioned into k subsets T1, T2,…Tk such that min ≤ k ≤ max and for each Tn,
matches(Tn, expr, m) by the remaining rules in this list.
"""
# TODO: Cardinality defaults into spec
min_ = expr.min if expr.min is not None else 1
max_ = expr.max if expr.max is not None else 1
cardinality_text = f"{{{min_},{'*' if max_ == -1 else max_}}}"
if c.debug and (min_ != 0 or len(T) != 0):
print(f"{cardinality_text} matching {len(T)} triples")
if min_ == 0 and len(T) == 0:
return True
if isinstance(expr, ShExJ.TripleConstraint):
if len(T) < min_:
if len(T) > 0:
_fail_triples(cntxt, T)
cntxt.fail_reason = f" {len(T)} triples less than {cardinality_text}"
else:
cntxt.fail_reason = f" No matching triples found for predicate {cntxt.n3_mapper.n3(expr.predicate)}"
return False
elif 0 <= max_ < len(T):
_fail_triples(cntxt, T)
cntxt.fail_reason = f" {len(T)} triples exceeds max {cardinality_text}"
return False
else:
return all(matchesTripleConstraint(cntxt, t, expr) for t in T)
else:
for partition in _partitions(T, min_, max_):
if all(matchesExpr(cntxt, part, expr) for part in partition):
return True
if min_ != 1 or max_ != 1:
_fail_triples(cntxt, T)
cntxt.fail_reason = f" {len(T)} triples cannot be partitioned into {cardinality_text} passing groups"
return False | python | def matchesCardinality(cntxt: Context, T: RDFGraph, expr: Union[ShExJ.tripleExpr, ShExJ.tripleExprLabel],
c: DebugContext) -> bool:
""" Evaluate cardinality expression
expr has a cardinality of min and/or max not equal to 1, where a max of -1 is treated as unbounded, and
T can be partitioned into k subsets T1, T2,…Tk such that min ≤ k ≤ max and for each Tn,
matches(Tn, expr, m) by the remaining rules in this list.
"""
# TODO: Cardinality defaults into spec
min_ = expr.min if expr.min is not None else 1
max_ = expr.max if expr.max is not None else 1
cardinality_text = f"{{{min_},{'*' if max_ == -1 else max_}}}"
if c.debug and (min_ != 0 or len(T) != 0):
print(f"{cardinality_text} matching {len(T)} triples")
if min_ == 0 and len(T) == 0:
return True
if isinstance(expr, ShExJ.TripleConstraint):
if len(T) < min_:
if len(T) > 0:
_fail_triples(cntxt, T)
cntxt.fail_reason = f" {len(T)} triples less than {cardinality_text}"
else:
cntxt.fail_reason = f" No matching triples found for predicate {cntxt.n3_mapper.n3(expr.predicate)}"
return False
elif 0 <= max_ < len(T):
_fail_triples(cntxt, T)
cntxt.fail_reason = f" {len(T)} triples exceeds max {cardinality_text}"
return False
else:
return all(matchesTripleConstraint(cntxt, t, expr) for t in T)
else:
for partition in _partitions(T, min_, max_):
if all(matchesExpr(cntxt, part, expr) for part in partition):
return True
if min_ != 1 or max_ != 1:
_fail_triples(cntxt, T)
cntxt.fail_reason = f" {len(T)} triples cannot be partitioned into {cardinality_text} passing groups"
return False | [
"def",
"matchesCardinality",
"(",
"cntxt",
":",
"Context",
",",
"T",
":",
"RDFGraph",
",",
"expr",
":",
"Union",
"[",
"ShExJ",
".",
"tripleExpr",
",",
"ShExJ",
".",
"tripleExprLabel",
"]",
",",
"c",
":",
"DebugContext",
")",
"->",
"bool",
":",
"# TODO: Cardinality defaults into spec",
"min_",
"=",
"expr",
".",
"min",
"if",
"expr",
".",
"min",
"is",
"not",
"None",
"else",
"1",
"max_",
"=",
"expr",
".",
"max",
"if",
"expr",
".",
"max",
"is",
"not",
"None",
"else",
"1",
"cardinality_text",
"=",
"f\"{{{min_},{'*' if max_ == -1 else max_}}}\"",
"if",
"c",
".",
"debug",
"and",
"(",
"min_",
"!=",
"0",
"or",
"len",
"(",
"T",
")",
"!=",
"0",
")",
":",
"print",
"(",
"f\"{cardinality_text} matching {len(T)} triples\"",
")",
"if",
"min_",
"==",
"0",
"and",
"len",
"(",
"T",
")",
"==",
"0",
":",
"return",
"True",
"if",
"isinstance",
"(",
"expr",
",",
"ShExJ",
".",
"TripleConstraint",
")",
":",
"if",
"len",
"(",
"T",
")",
"<",
"min_",
":",
"if",
"len",
"(",
"T",
")",
">",
"0",
":",
"_fail_triples",
"(",
"cntxt",
",",
"T",
")",
"cntxt",
".",
"fail_reason",
"=",
"f\" {len(T)} triples less than {cardinality_text}\"",
"else",
":",
"cntxt",
".",
"fail_reason",
"=",
"f\" No matching triples found for predicate {cntxt.n3_mapper.n3(expr.predicate)}\"",
"return",
"False",
"elif",
"0",
"<=",
"max_",
"<",
"len",
"(",
"T",
")",
":",
"_fail_triples",
"(",
"cntxt",
",",
"T",
")",
"cntxt",
".",
"fail_reason",
"=",
"f\" {len(T)} triples exceeds max {cardinality_text}\"",
"return",
"False",
"else",
":",
"return",
"all",
"(",
"matchesTripleConstraint",
"(",
"cntxt",
",",
"t",
",",
"expr",
")",
"for",
"t",
"in",
"T",
")",
"else",
":",
"for",
"partition",
"in",
"_partitions",
"(",
"T",
",",
"min_",
",",
"max_",
")",
":",
"if",
"all",
"(",
"matchesExpr",
"(",
"cntxt",
",",
"part",
",",
"expr",
")",
"for",
"part",
"in",
"partition",
")",
":",
"return",
"True",
"if",
"min_",
"!=",
"1",
"or",
"max_",
"!=",
"1",
":",
"_fail_triples",
"(",
"cntxt",
",",
"T",
")",
"cntxt",
".",
"fail_reason",
"=",
"f\" {len(T)} triples cannot be partitioned into {cardinality_text} passing groups\"",
"return",
"False"
] | Evaluate cardinality expression
expr has a cardinality of min and/or max not equal to 1, where a max of -1 is treated as unbounded, and
T can be partitioned into k subsets T1, T2,…Tk such that min ≤ k ≤ max and for each Tn,
matches(Tn, expr, m) by the remaining rules in this list. | [
"Evaluate",
"cardinality",
"expression"
] | train | https://github.com/hsolbrig/PyShEx/blob/9d659cc36e808afd66d4a6d60e8ea21cb12eb744/pyshex/shape_expressions_language/p5_5_shapes_and_triple_expressions.py#L196-L234 | 0.004264 |
IrvKalb/pygwidgets | pygwidgets/pygwidgets.py | ImageCollection.replace | def replace(self, key):
"""Selects a different image to be shown.
Parameters:
| key - a key in the original dictionary to specify which image to show
"""
if not (key in self.imagesDict):
print('The key', key, 'was not found in the collection of images dictionary')
raise KeyError
self.originalImage = self.imagesDict[key]
self.image = self.originalImage.copy()
# Set the rect of the image to appropriate values - using the current image
# then scale and rotate
self.rect = self.image.get_rect()
self.rect.x = self.loc[0]
self.rect.y = self.loc[1]
self.scale(self.percent, self.scaleFromCenter)
self.rotate(self.angle) | python | def replace(self, key):
"""Selects a different image to be shown.
Parameters:
| key - a key in the original dictionary to specify which image to show
"""
if not (key in self.imagesDict):
print('The key', key, 'was not found in the collection of images dictionary')
raise KeyError
self.originalImage = self.imagesDict[key]
self.image = self.originalImage.copy()
# Set the rect of the image to appropriate values - using the current image
# then scale and rotate
self.rect = self.image.get_rect()
self.rect.x = self.loc[0]
self.rect.y = self.loc[1]
self.scale(self.percent, self.scaleFromCenter)
self.rotate(self.angle) | [
"def",
"replace",
"(",
"self",
",",
"key",
")",
":",
"if",
"not",
"(",
"key",
"in",
"self",
".",
"imagesDict",
")",
":",
"print",
"(",
"'The key'",
",",
"key",
",",
"'was not found in the collection of images dictionary'",
")",
"raise",
"KeyError",
"self",
".",
"originalImage",
"=",
"self",
".",
"imagesDict",
"[",
"key",
"]",
"self",
".",
"image",
"=",
"self",
".",
"originalImage",
".",
"copy",
"(",
")",
"# Set the rect of the image to appropriate values - using the current image\r",
"# then scale and rotate\r",
"self",
".",
"rect",
"=",
"self",
".",
"image",
".",
"get_rect",
"(",
")",
"self",
".",
"rect",
".",
"x",
"=",
"self",
".",
"loc",
"[",
"0",
"]",
"self",
".",
"rect",
".",
"y",
"=",
"self",
".",
"loc",
"[",
"1",
"]",
"self",
".",
"scale",
"(",
"self",
".",
"percent",
",",
"self",
".",
"scaleFromCenter",
")",
"self",
".",
"rotate",
"(",
"self",
".",
"angle",
")"
] | Selects a different image to be shown.
Parameters:
| key - a key in the original dictionary to specify which image to show | [
"Selects",
"a",
"different",
"image",
"to",
"be",
"shown",
".",
"Parameters",
":",
"|",
"key",
"-",
"a",
"key",
"in",
"the",
"original",
"dictionary",
"to",
"specify",
"which",
"image",
"to",
"show"
] | train | https://github.com/IrvKalb/pygwidgets/blob/a830d8885d4d209e471cb53816277d30db56273c/pygwidgets/pygwidgets.py#L2381-L2401 | 0.006402 |
phalt/swapi-python | swapi/models.py | BaseQuerySet.order_by | def order_by(self, order_attribute):
''' Return the list of items in a certain order '''
to_return = []
for f in sorted(self.items, key=lambda i: getattr(i, order_attribute)):
to_return.append(f)
return to_return | python | def order_by(self, order_attribute):
''' Return the list of items in a certain order '''
to_return = []
for f in sorted(self.items, key=lambda i: getattr(i, order_attribute)):
to_return.append(f)
return to_return | [
"def",
"order_by",
"(",
"self",
",",
"order_attribute",
")",
":",
"to_return",
"=",
"[",
"]",
"for",
"f",
"in",
"sorted",
"(",
"self",
".",
"items",
",",
"key",
"=",
"lambda",
"i",
":",
"getattr",
"(",
"i",
",",
"order_attribute",
")",
")",
":",
"to_return",
".",
"append",
"(",
"f",
")",
"return",
"to_return"
] | Return the list of items in a certain order | [
"Return",
"the",
"list",
"of",
"items",
"in",
"a",
"certain",
"order"
] | train | https://github.com/phalt/swapi-python/blob/cb9195fc498a1d1fc3b1998d485edc94b8408ca7/swapi/models.py#L24-L29 | 0.007813 |
dropbox/pygerduty | pygerduty/__init__.py | PagerDuty.trigger_incident | def trigger_incident(self, service_key, description,
incident_key=None, details=None,
client=None, client_url=None, contexts=None):
""" Report a new or ongoing problem. When PagerDuty receives a trigger,
it will either open a new incident, or add a new log entry to an
existing incident.
"""
return self.create_event(service_key, description, "trigger",
details, incident_key,
client=client, client_url=client_url, contexts=contexts) | python | def trigger_incident(self, service_key, description,
incident_key=None, details=None,
client=None, client_url=None, contexts=None):
""" Report a new or ongoing problem. When PagerDuty receives a trigger,
it will either open a new incident, or add a new log entry to an
existing incident.
"""
return self.create_event(service_key, description, "trigger",
details, incident_key,
client=client, client_url=client_url, contexts=contexts) | [
"def",
"trigger_incident",
"(",
"self",
",",
"service_key",
",",
"description",
",",
"incident_key",
"=",
"None",
",",
"details",
"=",
"None",
",",
"client",
"=",
"None",
",",
"client_url",
"=",
"None",
",",
"contexts",
"=",
"None",
")",
":",
"return",
"self",
".",
"create_event",
"(",
"service_key",
",",
"description",
",",
"\"trigger\"",
",",
"details",
",",
"incident_key",
",",
"client",
"=",
"client",
",",
"client_url",
"=",
"client_url",
",",
"contexts",
"=",
"contexts",
")"
] | Report a new or ongoing problem. When PagerDuty receives a trigger,
it will either open a new incident, or add a new log entry to an
existing incident. | [
"Report",
"a",
"new",
"or",
"ongoing",
"problem",
".",
"When",
"PagerDuty",
"receives",
"a",
"trigger",
"it",
"will",
"either",
"open",
"a",
"new",
"incident",
"or",
"add",
"a",
"new",
"log",
"entry",
"to",
"an",
"existing",
"incident",
"."
] | train | https://github.com/dropbox/pygerduty/blob/11b28bfb66306aa7fc2b95ab9df65eb97ea831cf/pygerduty/__init__.py#L583-L593 | 0.008475 |
kytos/python-openflow | pyof/v0x01/common/flow_match.py | Match.fill_wildcards | def fill_wildcards(self, field=None, value=0):
"""Update wildcards attribute.
This method update a wildcards considering the attributes of the
current instance.
Args:
field (str): Name of the updated field.
value (GenericType): New value used in the field.
"""
if field in [None, 'wildcards'] or isinstance(value, Pad):
return
default_value = getattr(Match, field)
if isinstance(default_value, IPAddress):
if field == 'nw_dst':
shift = FlowWildCards.OFPFW_NW_DST_SHIFT
base_mask = FlowWildCards.OFPFW_NW_DST_MASK
else:
shift = FlowWildCards.OFPFW_NW_SRC_SHIFT
base_mask = FlowWildCards.OFPFW_NW_SRC_MASK
# First we clear the nw_dst/nw_src mask related bits on the current
# wildcard by setting 0 on all of them while we keep all other bits
# as they are.
self.wildcards &= FlowWildCards.OFPFW_ALL ^ base_mask
# nw_dst and nw_src wildcard fields have 6 bits each.
# "base_mask" is the 'all ones' for those 6 bits.
# Once we know the netmask, we can calculate the these 6 bits
# wildcard value and reverse them in order to insert them at the
# correct position in self.wildcards
wildcard = (value.max_prefix - value.netmask) << shift
self.wildcards |= wildcard
else:
wildcard_field = "OFPFW_{}".format(field.upper())
wildcard = getattr(FlowWildCards, wildcard_field)
if value == default_value and not (self.wildcards & wildcard) or \
value != default_value and (self.wildcards & wildcard):
self.wildcards ^= wildcard | python | def fill_wildcards(self, field=None, value=0):
"""Update wildcards attribute.
This method update a wildcards considering the attributes of the
current instance.
Args:
field (str): Name of the updated field.
value (GenericType): New value used in the field.
"""
if field in [None, 'wildcards'] or isinstance(value, Pad):
return
default_value = getattr(Match, field)
if isinstance(default_value, IPAddress):
if field == 'nw_dst':
shift = FlowWildCards.OFPFW_NW_DST_SHIFT
base_mask = FlowWildCards.OFPFW_NW_DST_MASK
else:
shift = FlowWildCards.OFPFW_NW_SRC_SHIFT
base_mask = FlowWildCards.OFPFW_NW_SRC_MASK
# First we clear the nw_dst/nw_src mask related bits on the current
# wildcard by setting 0 on all of them while we keep all other bits
# as they are.
self.wildcards &= FlowWildCards.OFPFW_ALL ^ base_mask
# nw_dst and nw_src wildcard fields have 6 bits each.
# "base_mask" is the 'all ones' for those 6 bits.
# Once we know the netmask, we can calculate the these 6 bits
# wildcard value and reverse them in order to insert them at the
# correct position in self.wildcards
wildcard = (value.max_prefix - value.netmask) << shift
self.wildcards |= wildcard
else:
wildcard_field = "OFPFW_{}".format(field.upper())
wildcard = getattr(FlowWildCards, wildcard_field)
if value == default_value and not (self.wildcards & wildcard) or \
value != default_value and (self.wildcards & wildcard):
self.wildcards ^= wildcard | [
"def",
"fill_wildcards",
"(",
"self",
",",
"field",
"=",
"None",
",",
"value",
"=",
"0",
")",
":",
"if",
"field",
"in",
"[",
"None",
",",
"'wildcards'",
"]",
"or",
"isinstance",
"(",
"value",
",",
"Pad",
")",
":",
"return",
"default_value",
"=",
"getattr",
"(",
"Match",
",",
"field",
")",
"if",
"isinstance",
"(",
"default_value",
",",
"IPAddress",
")",
":",
"if",
"field",
"==",
"'nw_dst'",
":",
"shift",
"=",
"FlowWildCards",
".",
"OFPFW_NW_DST_SHIFT",
"base_mask",
"=",
"FlowWildCards",
".",
"OFPFW_NW_DST_MASK",
"else",
":",
"shift",
"=",
"FlowWildCards",
".",
"OFPFW_NW_SRC_SHIFT",
"base_mask",
"=",
"FlowWildCards",
".",
"OFPFW_NW_SRC_MASK",
"# First we clear the nw_dst/nw_src mask related bits on the current",
"# wildcard by setting 0 on all of them while we keep all other bits",
"# as they are.",
"self",
".",
"wildcards",
"&=",
"FlowWildCards",
".",
"OFPFW_ALL",
"^",
"base_mask",
"# nw_dst and nw_src wildcard fields have 6 bits each.",
"# \"base_mask\" is the 'all ones' for those 6 bits.",
"# Once we know the netmask, we can calculate the these 6 bits",
"# wildcard value and reverse them in order to insert them at the",
"# correct position in self.wildcards",
"wildcard",
"=",
"(",
"value",
".",
"max_prefix",
"-",
"value",
".",
"netmask",
")",
"<<",
"shift",
"self",
".",
"wildcards",
"|=",
"wildcard",
"else",
":",
"wildcard_field",
"=",
"\"OFPFW_{}\"",
".",
"format",
"(",
"field",
".",
"upper",
"(",
")",
")",
"wildcard",
"=",
"getattr",
"(",
"FlowWildCards",
",",
"wildcard_field",
")",
"if",
"value",
"==",
"default_value",
"and",
"not",
"(",
"self",
".",
"wildcards",
"&",
"wildcard",
")",
"or",
"value",
"!=",
"default_value",
"and",
"(",
"self",
".",
"wildcards",
"&",
"wildcard",
")",
":",
"self",
".",
"wildcards",
"^=",
"wildcard"
] | Update wildcards attribute.
This method update a wildcards considering the attributes of the
current instance.
Args:
field (str): Name of the updated field.
value (GenericType): New value used in the field. | [
"Update",
"wildcards",
"attribute",
"."
] | train | https://github.com/kytos/python-openflow/blob/4f2d0d08ab28e102ed88fe57a4ee17729f1e1bb7/pyof/v0x01/common/flow_match.py#L163-L203 | 0.001102 |
marrow/mongo | marrow/mongo/core/trait/queryable.py | Queryable.reload | def reload(self, *fields, **kw):
"""Reload the entire document from the database, or refresh specific named top-level fields."""
Doc, collection, query, options = self._prepare_find(id=self.id, projection=fields, **kw)
result = collection.find_one(query, **options)
if fields: # Refresh only the requested data.
for k in result: # TODO: Better merge algorithm.
if k == ~Doc.id: continue
self.__data__[k] = result[k]
else:
self.__data__ = result
return self | python | def reload(self, *fields, **kw):
"""Reload the entire document from the database, or refresh specific named top-level fields."""
Doc, collection, query, options = self._prepare_find(id=self.id, projection=fields, **kw)
result = collection.find_one(query, **options)
if fields: # Refresh only the requested data.
for k in result: # TODO: Better merge algorithm.
if k == ~Doc.id: continue
self.__data__[k] = result[k]
else:
self.__data__ = result
return self | [
"def",
"reload",
"(",
"self",
",",
"*",
"fields",
",",
"*",
"*",
"kw",
")",
":",
"Doc",
",",
"collection",
",",
"query",
",",
"options",
"=",
"self",
".",
"_prepare_find",
"(",
"id",
"=",
"self",
".",
"id",
",",
"projection",
"=",
"fields",
",",
"*",
"*",
"kw",
")",
"result",
"=",
"collection",
".",
"find_one",
"(",
"query",
",",
"*",
"*",
"options",
")",
"if",
"fields",
":",
"# Refresh only the requested data.",
"for",
"k",
"in",
"result",
":",
"# TODO: Better merge algorithm.",
"if",
"k",
"==",
"~",
"Doc",
".",
"id",
":",
"continue",
"self",
".",
"__data__",
"[",
"k",
"]",
"=",
"result",
"[",
"k",
"]",
"else",
":",
"self",
".",
"__data__",
"=",
"result",
"return",
"self"
] | Reload the entire document from the database, or refresh specific named top-level fields. | [
"Reload",
"the",
"entire",
"document",
"from",
"the",
"database",
"or",
"refresh",
"specific",
"named",
"top",
"-",
"level",
"fields",
"."
] | train | https://github.com/marrow/mongo/blob/2066dc73e281b8a46cb5fc965267d6b8e1b18467/marrow/mongo/core/trait/queryable.py#L271-L284 | 0.042596 |
rliebz/whoswho | whoswho/model.py | Name.ratio_deep_compare | def ratio_deep_compare(self, other, settings):
"""
Compares each field of the name one at a time to see if they match.
Each name field has context-specific comparison logic.
:param Name other: other Name for comparison
:return int: sequence ratio match (out of 100)
"""
if not self._is_compatible_with(other):
return 0
first, middle, last = self._compare_components(other, settings, True)
f_weight, m_weight, l_weight = self._determine_weights(other, settings)
total_weight = f_weight + m_weight + l_weight
result = (
first * f_weight +
middle * m_weight +
last * l_weight
) / total_weight
return result | python | def ratio_deep_compare(self, other, settings):
"""
Compares each field of the name one at a time to see if they match.
Each name field has context-specific comparison logic.
:param Name other: other Name for comparison
:return int: sequence ratio match (out of 100)
"""
if not self._is_compatible_with(other):
return 0
first, middle, last = self._compare_components(other, settings, True)
f_weight, m_weight, l_weight = self._determine_weights(other, settings)
total_weight = f_weight + m_weight + l_weight
result = (
first * f_weight +
middle * m_weight +
last * l_weight
) / total_weight
return result | [
"def",
"ratio_deep_compare",
"(",
"self",
",",
"other",
",",
"settings",
")",
":",
"if",
"not",
"self",
".",
"_is_compatible_with",
"(",
"other",
")",
":",
"return",
"0",
"first",
",",
"middle",
",",
"last",
"=",
"self",
".",
"_compare_components",
"(",
"other",
",",
"settings",
",",
"True",
")",
"f_weight",
",",
"m_weight",
",",
"l_weight",
"=",
"self",
".",
"_determine_weights",
"(",
"other",
",",
"settings",
")",
"total_weight",
"=",
"f_weight",
"+",
"m_weight",
"+",
"l_weight",
"result",
"=",
"(",
"first",
"*",
"f_weight",
"+",
"middle",
"*",
"m_weight",
"+",
"last",
"*",
"l_weight",
")",
"/",
"total_weight",
"return",
"result"
] | Compares each field of the name one at a time to see if they match.
Each name field has context-specific comparison logic.
:param Name other: other Name for comparison
:return int: sequence ratio match (out of 100) | [
"Compares",
"each",
"field",
"of",
"the",
"name",
"one",
"at",
"a",
"time",
"to",
"see",
"if",
"they",
"match",
".",
"Each",
"name",
"field",
"has",
"context",
"-",
"specific",
"comparison",
"logic",
"."
] | train | https://github.com/rliebz/whoswho/blob/0c411e418c240fcec6ea0a23d15bd003056c65d0/whoswho/model.py#L41-L63 | 0.002632 |
bcbio/bcbio-nextgen | bcbio/rnaseq/cufflinks.py | strand_unknown | def strand_unknown(db, transcript):
"""
for unstranded data with novel transcripts single exon genes
will have no strand information. single exon novel genes are also
a source of noise in the Cufflinks assembly so this removes them
"""
features = list(db.children(transcript))
strand = features[0].strand
if strand == ".":
return True
else:
return False | python | def strand_unknown(db, transcript):
"""
for unstranded data with novel transcripts single exon genes
will have no strand information. single exon novel genes are also
a source of noise in the Cufflinks assembly so this removes them
"""
features = list(db.children(transcript))
strand = features[0].strand
if strand == ".":
return True
else:
return False | [
"def",
"strand_unknown",
"(",
"db",
",",
"transcript",
")",
":",
"features",
"=",
"list",
"(",
"db",
".",
"children",
"(",
"transcript",
")",
")",
"strand",
"=",
"features",
"[",
"0",
"]",
".",
"strand",
"if",
"strand",
"==",
"\".\"",
":",
"return",
"True",
"else",
":",
"return",
"False"
] | for unstranded data with novel transcripts single exon genes
will have no strand information. single exon novel genes are also
a source of noise in the Cufflinks assembly so this removes them | [
"for",
"unstranded",
"data",
"with",
"novel",
"transcripts",
"single",
"exon",
"genes",
"will",
"have",
"no",
"strand",
"information",
".",
"single",
"exon",
"novel",
"genes",
"are",
"also",
"a",
"source",
"of",
"noise",
"in",
"the",
"Cufflinks",
"assembly",
"so",
"this",
"removes",
"them"
] | train | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/rnaseq/cufflinks.py#L156-L167 | 0.002469 |
ThreatConnect-Inc/tcex | tcex/tcex_bin_profile.py | TcExProfile.profile_update | def profile_update(self, profile):
"""Update an existing profile with new parameters or remove deprecated parameters.
Args:
profile (dict): The dictionary containting the profile settings.
"""
# warn about missing install_json parameter
if profile.get('install_json') is None:
print(
'{}{}Missing install_json parameter for profile {}.'.format(
c.Style.BRIGHT, c.Fore.YELLOW, profile.get('profile_name')
)
)
# update args section to v2 schema
self.profile_update_args_v2(profile)
# update args section to v3 schema
self.profile_update_args_v3(profile)
# remove legacy script field
self.profile_update_schema(profile) | python | def profile_update(self, profile):
"""Update an existing profile with new parameters or remove deprecated parameters.
Args:
profile (dict): The dictionary containting the profile settings.
"""
# warn about missing install_json parameter
if profile.get('install_json') is None:
print(
'{}{}Missing install_json parameter for profile {}.'.format(
c.Style.BRIGHT, c.Fore.YELLOW, profile.get('profile_name')
)
)
# update args section to v2 schema
self.profile_update_args_v2(profile)
# update args section to v3 schema
self.profile_update_args_v3(profile)
# remove legacy script field
self.profile_update_schema(profile) | [
"def",
"profile_update",
"(",
"self",
",",
"profile",
")",
":",
"# warn about missing install_json parameter",
"if",
"profile",
".",
"get",
"(",
"'install_json'",
")",
"is",
"None",
":",
"print",
"(",
"'{}{}Missing install_json parameter for profile {}.'",
".",
"format",
"(",
"c",
".",
"Style",
".",
"BRIGHT",
",",
"c",
".",
"Fore",
".",
"YELLOW",
",",
"profile",
".",
"get",
"(",
"'profile_name'",
")",
")",
")",
"# update args section to v2 schema",
"self",
".",
"profile_update_args_v2",
"(",
"profile",
")",
"# update args section to v3 schema",
"self",
".",
"profile_update_args_v3",
"(",
"profile",
")",
"# remove legacy script field",
"self",
".",
"profile_update_schema",
"(",
"profile",
")"
] | Update an existing profile with new parameters or remove deprecated parameters.
Args:
profile (dict): The dictionary containting the profile settings. | [
"Update",
"an",
"existing",
"profile",
"with",
"new",
"parameters",
"or",
"remove",
"deprecated",
"parameters",
"."
] | train | https://github.com/ThreatConnect-Inc/tcex/blob/dd4d7a1ef723af1561687120191886b9a2fd4b47/tcex/tcex_bin_profile.py#L510-L531 | 0.003769 |
polysquare/polysquare-setuptools-lint | polysquare_setuptools_lint/__init__.py | _custom_argv | def _custom_argv(argv):
"""Overwrite argv[1:] with argv, restore on exit."""
backup_argv = sys.argv
sys.argv = backup_argv[:1] + argv
try:
yield
finally:
sys.argv = backup_argv | python | def _custom_argv(argv):
"""Overwrite argv[1:] with argv, restore on exit."""
backup_argv = sys.argv
sys.argv = backup_argv[:1] + argv
try:
yield
finally:
sys.argv = backup_argv | [
"def",
"_custom_argv",
"(",
"argv",
")",
":",
"backup_argv",
"=",
"sys",
".",
"argv",
"sys",
".",
"argv",
"=",
"backup_argv",
"[",
":",
"1",
"]",
"+",
"argv",
"try",
":",
"yield",
"finally",
":",
"sys",
".",
"argv",
"=",
"backup_argv"
] | Overwrite argv[1:] with argv, restore on exit. | [
"Overwrite",
"argv",
"[",
"1",
":",
"]",
"with",
"argv",
"restore",
"on",
"exit",
"."
] | train | https://github.com/polysquare/polysquare-setuptools-lint/blob/5df5a6401c7ad6a90b42230eeb99c82cc56952b6/polysquare_setuptools_lint/__init__.py#L42-L49 | 0.004717 |
PGower/PyCanvas | pycanvas/apis/authentications_log.py | AuthenticationsLogAPI.query_by_login | def query_by_login(self, login_id, end_time=None, start_time=None):
"""
Query by login.
List authentication events for a given login.
"""
path = {}
data = {}
params = {}
# REQUIRED - PATH - login_id
"""ID"""
path["login_id"] = login_id
# OPTIONAL - start_time
"""The beginning of the time range from which you want events."""
if start_time is not None:
params["start_time"] = start_time
# OPTIONAL - end_time
"""The end of the time range from which you want events."""
if end_time is not None:
params["end_time"] = end_time
self.logger.debug("GET /api/v1/audit/authentication/logins/{login_id} with query params: {params} and form data: {data}".format(params=params, data=data, **path))
return self.generic_request("GET", "/api/v1/audit/authentication/logins/{login_id}".format(**path), data=data, params=params, no_data=True) | python | def query_by_login(self, login_id, end_time=None, start_time=None):
"""
Query by login.
List authentication events for a given login.
"""
path = {}
data = {}
params = {}
# REQUIRED - PATH - login_id
"""ID"""
path["login_id"] = login_id
# OPTIONAL - start_time
"""The beginning of the time range from which you want events."""
if start_time is not None:
params["start_time"] = start_time
# OPTIONAL - end_time
"""The end of the time range from which you want events."""
if end_time is not None:
params["end_time"] = end_time
self.logger.debug("GET /api/v1/audit/authentication/logins/{login_id} with query params: {params} and form data: {data}".format(params=params, data=data, **path))
return self.generic_request("GET", "/api/v1/audit/authentication/logins/{login_id}".format(**path), data=data, params=params, no_data=True) | [
"def",
"query_by_login",
"(",
"self",
",",
"login_id",
",",
"end_time",
"=",
"None",
",",
"start_time",
"=",
"None",
")",
":",
"path",
"=",
"{",
"}",
"data",
"=",
"{",
"}",
"params",
"=",
"{",
"}",
"# REQUIRED - PATH - login_id\r",
"\"\"\"ID\"\"\"",
"path",
"[",
"\"login_id\"",
"]",
"=",
"login_id",
"# OPTIONAL - start_time\r",
"\"\"\"The beginning of the time range from which you want events.\"\"\"",
"if",
"start_time",
"is",
"not",
"None",
":",
"params",
"[",
"\"start_time\"",
"]",
"=",
"start_time",
"# OPTIONAL - end_time\r",
"\"\"\"The end of the time range from which you want events.\"\"\"",
"if",
"end_time",
"is",
"not",
"None",
":",
"params",
"[",
"\"end_time\"",
"]",
"=",
"end_time",
"self",
".",
"logger",
".",
"debug",
"(",
"\"GET /api/v1/audit/authentication/logins/{login_id} with query params: {params} and form data: {data}\"",
".",
"format",
"(",
"params",
"=",
"params",
",",
"data",
"=",
"data",
",",
"*",
"*",
"path",
")",
")",
"return",
"self",
".",
"generic_request",
"(",
"\"GET\"",
",",
"\"/api/v1/audit/authentication/logins/{login_id}\"",
".",
"format",
"(",
"*",
"*",
"path",
")",
",",
"data",
"=",
"data",
",",
"params",
"=",
"params",
",",
"no_data",
"=",
"True",
")"
] | Query by login.
List authentication events for a given login. | [
"Query",
"by",
"login",
".",
"List",
"authentication",
"events",
"for",
"a",
"given",
"login",
"."
] | train | https://github.com/PGower/PyCanvas/blob/68520005382b440a1e462f9df369f54d364e21e8/pycanvas/apis/authentications_log.py#L19-L44 | 0.003906 |
pycontribs/pyrax | pyrax/clouddns.py | CloudDNSManager._create_body | def _create_body(self, name, emailAddress, ttl=3600, comment=None,
subdomains=None, records=None):
"""
Creates the appropriate dict for creating a new domain.
"""
if subdomains is None:
subdomains = []
if records is None:
records = []
body = {"domains": [{
"name": name,
"emailAddress": emailAddress,
"ttl": ttl,
"comment": comment,
"subdomains": {
"domains": subdomains
},
"recordsList": {
"records": records
},
}]}
return body | python | def _create_body(self, name, emailAddress, ttl=3600, comment=None,
subdomains=None, records=None):
"""
Creates the appropriate dict for creating a new domain.
"""
if subdomains is None:
subdomains = []
if records is None:
records = []
body = {"domains": [{
"name": name,
"emailAddress": emailAddress,
"ttl": ttl,
"comment": comment,
"subdomains": {
"domains": subdomains
},
"recordsList": {
"records": records
},
}]}
return body | [
"def",
"_create_body",
"(",
"self",
",",
"name",
",",
"emailAddress",
",",
"ttl",
"=",
"3600",
",",
"comment",
"=",
"None",
",",
"subdomains",
"=",
"None",
",",
"records",
"=",
"None",
")",
":",
"if",
"subdomains",
"is",
"None",
":",
"subdomains",
"=",
"[",
"]",
"if",
"records",
"is",
"None",
":",
"records",
"=",
"[",
"]",
"body",
"=",
"{",
"\"domains\"",
":",
"[",
"{",
"\"name\"",
":",
"name",
",",
"\"emailAddress\"",
":",
"emailAddress",
",",
"\"ttl\"",
":",
"ttl",
",",
"\"comment\"",
":",
"comment",
",",
"\"subdomains\"",
":",
"{",
"\"domains\"",
":",
"subdomains",
"}",
",",
"\"recordsList\"",
":",
"{",
"\"records\"",
":",
"records",
"}",
",",
"}",
"]",
"}",
"return",
"body"
] | Creates the appropriate dict for creating a new domain. | [
"Creates",
"the",
"appropriate",
"dict",
"for",
"creating",
"a",
"new",
"domain",
"."
] | train | https://github.com/pycontribs/pyrax/blob/9ddfd5064b3a292d7337906f3b2d5dce95b50b99/pyrax/clouddns.py#L301-L322 | 0.004213 |
saltstack/salt | salt/modules/launchctl_service.py | status | def status(name, runas=None):
'''
Return the status for a service via systemd.
If the name contains globbing, a dict mapping service name to True/False
values is returned.
.. versionchanged:: 2018.3.0
The service name can now be a glob (e.g. ``salt*``)
Args:
name (str): The name of the service to check
runas (str): User to run launchctl commands
Returns:
bool: True if running, False otherwise
dict: Maps service name to True if running, False otherwise
CLI Example:
.. code-block:: bash
salt '*' service.status <service name>
'''
contains_globbing = bool(re.search(r'\*|\?|\[.+\]', name))
if contains_globbing:
services = fnmatch.filter(get_all(), name)
else:
services = [name]
results = {}
for service in services:
service_info = _service_by_name(service)
lookup_name = service_info['plist']['Label'] if service_info else service
launchctl_data = _get_launchctl_data(lookup_name, runas=runas)
if launchctl_data:
if BEFORE_YOSEMITE:
if six.PY3:
results[service] = 'PID' in plistlib.loads(launchctl_data)
else:
results[service] = 'PID' in dict(plistlib.readPlistFromString(launchctl_data))
else:
pattern = '"PID" = [0-9]+;'
results[service] = True if re.search(pattern, launchctl_data) else False
else:
results[service] = False
if contains_globbing:
return results
return results[name] | python | def status(name, runas=None):
'''
Return the status for a service via systemd.
If the name contains globbing, a dict mapping service name to True/False
values is returned.
.. versionchanged:: 2018.3.0
The service name can now be a glob (e.g. ``salt*``)
Args:
name (str): The name of the service to check
runas (str): User to run launchctl commands
Returns:
bool: True if running, False otherwise
dict: Maps service name to True if running, False otherwise
CLI Example:
.. code-block:: bash
salt '*' service.status <service name>
'''
contains_globbing = bool(re.search(r'\*|\?|\[.+\]', name))
if contains_globbing:
services = fnmatch.filter(get_all(), name)
else:
services = [name]
results = {}
for service in services:
service_info = _service_by_name(service)
lookup_name = service_info['plist']['Label'] if service_info else service
launchctl_data = _get_launchctl_data(lookup_name, runas=runas)
if launchctl_data:
if BEFORE_YOSEMITE:
if six.PY3:
results[service] = 'PID' in plistlib.loads(launchctl_data)
else:
results[service] = 'PID' in dict(plistlib.readPlistFromString(launchctl_data))
else:
pattern = '"PID" = [0-9]+;'
results[service] = True if re.search(pattern, launchctl_data) else False
else:
results[service] = False
if contains_globbing:
return results
return results[name] | [
"def",
"status",
"(",
"name",
",",
"runas",
"=",
"None",
")",
":",
"contains_globbing",
"=",
"bool",
"(",
"re",
".",
"search",
"(",
"r'\\*|\\?|\\[.+\\]'",
",",
"name",
")",
")",
"if",
"contains_globbing",
":",
"services",
"=",
"fnmatch",
".",
"filter",
"(",
"get_all",
"(",
")",
",",
"name",
")",
"else",
":",
"services",
"=",
"[",
"name",
"]",
"results",
"=",
"{",
"}",
"for",
"service",
"in",
"services",
":",
"service_info",
"=",
"_service_by_name",
"(",
"service",
")",
"lookup_name",
"=",
"service_info",
"[",
"'plist'",
"]",
"[",
"'Label'",
"]",
"if",
"service_info",
"else",
"service",
"launchctl_data",
"=",
"_get_launchctl_data",
"(",
"lookup_name",
",",
"runas",
"=",
"runas",
")",
"if",
"launchctl_data",
":",
"if",
"BEFORE_YOSEMITE",
":",
"if",
"six",
".",
"PY3",
":",
"results",
"[",
"service",
"]",
"=",
"'PID'",
"in",
"plistlib",
".",
"loads",
"(",
"launchctl_data",
")",
"else",
":",
"results",
"[",
"service",
"]",
"=",
"'PID'",
"in",
"dict",
"(",
"plistlib",
".",
"readPlistFromString",
"(",
"launchctl_data",
")",
")",
"else",
":",
"pattern",
"=",
"'\"PID\" = [0-9]+;'",
"results",
"[",
"service",
"]",
"=",
"True",
"if",
"re",
".",
"search",
"(",
"pattern",
",",
"launchctl_data",
")",
"else",
"False",
"else",
":",
"results",
"[",
"service",
"]",
"=",
"False",
"if",
"contains_globbing",
":",
"return",
"results",
"return",
"results",
"[",
"name",
"]"
] | Return the status for a service via systemd.
If the name contains globbing, a dict mapping service name to True/False
values is returned.
.. versionchanged:: 2018.3.0
The service name can now be a glob (e.g. ``salt*``)
Args:
name (str): The name of the service to check
runas (str): User to run launchctl commands
Returns:
bool: True if running, False otherwise
dict: Maps service name to True if running, False otherwise
CLI Example:
.. code-block:: bash
salt '*' service.status <service name> | [
"Return",
"the",
"status",
"for",
"a",
"service",
"via",
"systemd",
".",
"If",
"the",
"name",
"contains",
"globbing",
"a",
"dict",
"mapping",
"service",
"name",
"to",
"True",
"/",
"False",
"values",
"is",
"returned",
"."
] | train | https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/launchctl_service.py#L222-L271 | 0.002475 |
rosenbrockc/fortpy | fortpy/scripts/analyze.py | FortpyShell.do_parse | def do_parse(self, arg, fullparse=False):
"""Parse the test results from the specified directory and load them under the name
of 'module.executable ' that they were created with. E.g. parse classes.polya/
"""
from os import path
fullpath = path.abspath(path.expanduser(arg))
if path.isdir(fullpath):
if fullpath[-1] == "/":
end = -2
else:
end = -1
case = fullpath.split("/")[end]
self.tests[case] = Analysis(fullpath, fullparse)
self.do_set(case)
else:
msg.err("The folder {} does not exist.".format(fullpath)) | python | def do_parse(self, arg, fullparse=False):
"""Parse the test results from the specified directory and load them under the name
of 'module.executable ' that they were created with. E.g. parse classes.polya/
"""
from os import path
fullpath = path.abspath(path.expanduser(arg))
if path.isdir(fullpath):
if fullpath[-1] == "/":
end = -2
else:
end = -1
case = fullpath.split("/")[end]
self.tests[case] = Analysis(fullpath, fullparse)
self.do_set(case)
else:
msg.err("The folder {} does not exist.".format(fullpath)) | [
"def",
"do_parse",
"(",
"self",
",",
"arg",
",",
"fullparse",
"=",
"False",
")",
":",
"from",
"os",
"import",
"path",
"fullpath",
"=",
"path",
".",
"abspath",
"(",
"path",
".",
"expanduser",
"(",
"arg",
")",
")",
"if",
"path",
".",
"isdir",
"(",
"fullpath",
")",
":",
"if",
"fullpath",
"[",
"-",
"1",
"]",
"==",
"\"/\"",
":",
"end",
"=",
"-",
"2",
"else",
":",
"end",
"=",
"-",
"1",
"case",
"=",
"fullpath",
".",
"split",
"(",
"\"/\"",
")",
"[",
"end",
"]",
"self",
".",
"tests",
"[",
"case",
"]",
"=",
"Analysis",
"(",
"fullpath",
",",
"fullparse",
")",
"self",
".",
"do_set",
"(",
"case",
")",
"else",
":",
"msg",
".",
"err",
"(",
"\"The folder {} does not exist.\"",
".",
"format",
"(",
"fullpath",
")",
")"
] | Parse the test results from the specified directory and load them under the name
of 'module.executable ' that they were created with. E.g. parse classes.polya/ | [
"Parse",
"the",
"test",
"results",
"from",
"the",
"specified",
"directory",
"and",
"load",
"them",
"under",
"the",
"name",
"of",
"module",
".",
"executable",
"that",
"they",
"were",
"created",
"with",
".",
"E",
".",
"g",
".",
"parse",
"classes",
".",
"polya",
"/"
] | train | https://github.com/rosenbrockc/fortpy/blob/1ed0757c52d549e41d9d44bdea68cb89529293a5/fortpy/scripts/analyze.py#L1165-L1180 | 0.00597 |
mabuchilab/QNET | src/qnet/printing/sreprprinter.py | IndentedSReprPrinter._get_from_cache | def _get_from_cache(self, expr):
"""Obtain cached result, prepend with the keyname if necessary, and
indent for the current level"""
is_cached, res = super()._get_from_cache(expr)
if is_cached:
indent_str = " " * self._print_level
return True, indent(res, indent_str)
else:
return False, None | python | def _get_from_cache(self, expr):
"""Obtain cached result, prepend with the keyname if necessary, and
indent for the current level"""
is_cached, res = super()._get_from_cache(expr)
if is_cached:
indent_str = " " * self._print_level
return True, indent(res, indent_str)
else:
return False, None | [
"def",
"_get_from_cache",
"(",
"self",
",",
"expr",
")",
":",
"is_cached",
",",
"res",
"=",
"super",
"(",
")",
".",
"_get_from_cache",
"(",
"expr",
")",
"if",
"is_cached",
":",
"indent_str",
"=",
"\" \"",
"*",
"self",
".",
"_print_level",
"return",
"True",
",",
"indent",
"(",
"res",
",",
"indent_str",
")",
"else",
":",
"return",
"False",
",",
"None"
] | Obtain cached result, prepend with the keyname if necessary, and
indent for the current level | [
"Obtain",
"cached",
"result",
"prepend",
"with",
"the",
"keyname",
"if",
"necessary",
"and",
"indent",
"for",
"the",
"current",
"level"
] | train | https://github.com/mabuchilab/QNET/blob/cc20d26dad78691d34c67173e5cd67dcac94208a/src/qnet/printing/sreprprinter.py#L59-L67 | 0.005376 |
KrishnaswamyLab/PHATE | Python/phate/phate.py | PHATE.von_neumann_entropy | def von_neumann_entropy(self, t_max=100):
"""Calculate Von Neumann Entropy
Determines the Von Neumann entropy of the diffusion affinities
at varying levels of `t`. The user should select a value of `t`
around the "knee" of the entropy curve.
We require that 'fit' stores the value of `PHATE.diff_op`
in order to calculate the Von Neumann entropy.
Parameters
----------
t_max : int, default: 100
Maximum value of `t` to test
Returns
-------
entropy : array, shape=[t_max]
The entropy of the diffusion affinities for each value of `t`
"""
t = np.arange(t_max)
return t, vne.compute_von_neumann_entropy(self.diff_op, t_max=t_max) | python | def von_neumann_entropy(self, t_max=100):
"""Calculate Von Neumann Entropy
Determines the Von Neumann entropy of the diffusion affinities
at varying levels of `t`. The user should select a value of `t`
around the "knee" of the entropy curve.
We require that 'fit' stores the value of `PHATE.diff_op`
in order to calculate the Von Neumann entropy.
Parameters
----------
t_max : int, default: 100
Maximum value of `t` to test
Returns
-------
entropy : array, shape=[t_max]
The entropy of the diffusion affinities for each value of `t`
"""
t = np.arange(t_max)
return t, vne.compute_von_neumann_entropy(self.diff_op, t_max=t_max) | [
"def",
"von_neumann_entropy",
"(",
"self",
",",
"t_max",
"=",
"100",
")",
":",
"t",
"=",
"np",
".",
"arange",
"(",
"t_max",
")",
"return",
"t",
",",
"vne",
".",
"compute_von_neumann_entropy",
"(",
"self",
".",
"diff_op",
",",
"t_max",
"=",
"t_max",
")"
] | Calculate Von Neumann Entropy
Determines the Von Neumann entropy of the diffusion affinities
at varying levels of `t`. The user should select a value of `t`
around the "knee" of the entropy curve.
We require that 'fit' stores the value of `PHATE.diff_op`
in order to calculate the Von Neumann entropy.
Parameters
----------
t_max : int, default: 100
Maximum value of `t` to test
Returns
-------
entropy : array, shape=[t_max]
The entropy of the diffusion affinities for each value of `t` | [
"Calculate",
"Von",
"Neumann",
"Entropy"
] | train | https://github.com/KrishnaswamyLab/PHATE/blob/346a4597dcfc523f8bef99bce482e677282b6719/Python/phate/phate.py#L865-L886 | 0.002584 |
anthill/koala | koala/reader.py | read_rels | def read_rels(archive):
"""Read relationships for a workbook"""
xml_source = archive.read(ARC_WORKBOOK_RELS)
tree = fromstring(xml_source)
for element in safe_iterator(tree, '{%s}Relationship' % PKG_REL_NS):
rId = element.get('Id')
pth = element.get("Target")
typ = element.get('Type')
# normalise path
if pth.startswith("/xl"):
pth = pth.replace("/xl", "xl")
elif not pth.startswith("xl") and not pth.startswith(".."):
pth = "xl/" + pth
yield rId, {'path':pth, 'type':typ} | python | def read_rels(archive):
"""Read relationships for a workbook"""
xml_source = archive.read(ARC_WORKBOOK_RELS)
tree = fromstring(xml_source)
for element in safe_iterator(tree, '{%s}Relationship' % PKG_REL_NS):
rId = element.get('Id')
pth = element.get("Target")
typ = element.get('Type')
# normalise path
if pth.startswith("/xl"):
pth = pth.replace("/xl", "xl")
elif not pth.startswith("xl") and not pth.startswith(".."):
pth = "xl/" + pth
yield rId, {'path':pth, 'type':typ} | [
"def",
"read_rels",
"(",
"archive",
")",
":",
"xml_source",
"=",
"archive",
".",
"read",
"(",
"ARC_WORKBOOK_RELS",
")",
"tree",
"=",
"fromstring",
"(",
"xml_source",
")",
"for",
"element",
"in",
"safe_iterator",
"(",
"tree",
",",
"'{%s}Relationship'",
"%",
"PKG_REL_NS",
")",
":",
"rId",
"=",
"element",
".",
"get",
"(",
"'Id'",
")",
"pth",
"=",
"element",
".",
"get",
"(",
"\"Target\"",
")",
"typ",
"=",
"element",
".",
"get",
"(",
"'Type'",
")",
"# normalise path",
"if",
"pth",
".",
"startswith",
"(",
"\"/xl\"",
")",
":",
"pth",
"=",
"pth",
".",
"replace",
"(",
"\"/xl\"",
",",
"\"xl\"",
")",
"elif",
"not",
"pth",
".",
"startswith",
"(",
"\"xl\"",
")",
"and",
"not",
"pth",
".",
"startswith",
"(",
"\"..\"",
")",
":",
"pth",
"=",
"\"xl/\"",
"+",
"pth",
"yield",
"rId",
",",
"{",
"'path'",
":",
"pth",
",",
"'type'",
":",
"typ",
"}"
] | Read relationships for a workbook | [
"Read",
"relationships",
"for",
"a",
"workbook"
] | train | https://github.com/anthill/koala/blob/393089fe081380506e73235db18a32b4e078d222/koala/reader.py#L231-L244 | 0.005272 |
numenta/htmresearch | htmresearch/algorithms/faulty_temporal_memory.py | FaultyTemporalMemory.burstColumn | def burstColumn(self, column, columnMatchingSegments, prevActiveCells,
prevWinnerCells, learn):
"""
Activates all of the cells in an unpredicted active column, chooses a winner
cell, and, if learning is turned on, learns on one segment, growing a new
segment if necessary.
@param column (int)
Index of bursting column.
@param columnMatchingSegments (iter)
Matching segments in this column, or None if there aren't any.
@param prevActiveCells (list)
Active cells in `t-1`.
@param prevWinnerCells (list)
Winner cells in `t-1`.
@param learn (bool)
Whether or not learning is enabled.
@return (tuple) Contains:
`cells` (iter),
`winnerCell` (int),
"""
start = self.cellsPerColumn * column
# Strip out destroyed cells before passing along to base _burstColumn()
cellsForColumn = [cellIdx
for cellIdx
in xrange(start, start + self.cellsPerColumn)
if cellIdx not in self.deadCells]
return self._burstColumn(
self.connections, self._random, self.lastUsedIterationForSegment, column,
columnMatchingSegments, prevActiveCells, prevWinnerCells, cellsForColumn,
self.numActivePotentialSynapsesForSegment, self.iteration,
self.maxNewSynapseCount, self.initialPermanence, self.permanenceIncrement,
self.permanenceDecrement, self.maxSegmentsPerCell,
self.maxSynapsesPerSegment, learn) | python | def burstColumn(self, column, columnMatchingSegments, prevActiveCells,
prevWinnerCells, learn):
"""
Activates all of the cells in an unpredicted active column, chooses a winner
cell, and, if learning is turned on, learns on one segment, growing a new
segment if necessary.
@param column (int)
Index of bursting column.
@param columnMatchingSegments (iter)
Matching segments in this column, or None if there aren't any.
@param prevActiveCells (list)
Active cells in `t-1`.
@param prevWinnerCells (list)
Winner cells in `t-1`.
@param learn (bool)
Whether or not learning is enabled.
@return (tuple) Contains:
`cells` (iter),
`winnerCell` (int),
"""
start = self.cellsPerColumn * column
# Strip out destroyed cells before passing along to base _burstColumn()
cellsForColumn = [cellIdx
for cellIdx
in xrange(start, start + self.cellsPerColumn)
if cellIdx not in self.deadCells]
return self._burstColumn(
self.connections, self._random, self.lastUsedIterationForSegment, column,
columnMatchingSegments, prevActiveCells, prevWinnerCells, cellsForColumn,
self.numActivePotentialSynapsesForSegment, self.iteration,
self.maxNewSynapseCount, self.initialPermanence, self.permanenceIncrement,
self.permanenceDecrement, self.maxSegmentsPerCell,
self.maxSynapsesPerSegment, learn) | [
"def",
"burstColumn",
"(",
"self",
",",
"column",
",",
"columnMatchingSegments",
",",
"prevActiveCells",
",",
"prevWinnerCells",
",",
"learn",
")",
":",
"start",
"=",
"self",
".",
"cellsPerColumn",
"*",
"column",
"# Strip out destroyed cells before passing along to base _burstColumn()",
"cellsForColumn",
"=",
"[",
"cellIdx",
"for",
"cellIdx",
"in",
"xrange",
"(",
"start",
",",
"start",
"+",
"self",
".",
"cellsPerColumn",
")",
"if",
"cellIdx",
"not",
"in",
"self",
".",
"deadCells",
"]",
"return",
"self",
".",
"_burstColumn",
"(",
"self",
".",
"connections",
",",
"self",
".",
"_random",
",",
"self",
".",
"lastUsedIterationForSegment",
",",
"column",
",",
"columnMatchingSegments",
",",
"prevActiveCells",
",",
"prevWinnerCells",
",",
"cellsForColumn",
",",
"self",
".",
"numActivePotentialSynapsesForSegment",
",",
"self",
".",
"iteration",
",",
"self",
".",
"maxNewSynapseCount",
",",
"self",
".",
"initialPermanence",
",",
"self",
".",
"permanenceIncrement",
",",
"self",
".",
"permanenceDecrement",
",",
"self",
".",
"maxSegmentsPerCell",
",",
"self",
".",
"maxSynapsesPerSegment",
",",
"learn",
")"
] | Activates all of the cells in an unpredicted active column, chooses a winner
cell, and, if learning is turned on, learns on one segment, growing a new
segment if necessary.
@param column (int)
Index of bursting column.
@param columnMatchingSegments (iter)
Matching segments in this column, or None if there aren't any.
@param prevActiveCells (list)
Active cells in `t-1`.
@param prevWinnerCells (list)
Winner cells in `t-1`.
@param learn (bool)
Whether or not learning is enabled.
@return (tuple) Contains:
`cells` (iter),
`winnerCell` (int), | [
"Activates",
"all",
"of",
"the",
"cells",
"in",
"an",
"unpredicted",
"active",
"column",
"chooses",
"a",
"winner",
"cell",
"and",
"if",
"learning",
"is",
"turned",
"on",
"learns",
"on",
"one",
"segment",
"growing",
"a",
"new",
"segment",
"if",
"necessary",
"."
] | train | https://github.com/numenta/htmresearch/blob/70c096b09a577ea0432c3f3bfff4442d4871b7aa/htmresearch/algorithms/faulty_temporal_memory.py#L81-L122 | 0.002613 |
angr/claripy | claripy/vsa/strided_interval.py | StridedInterval.SLT | def SLT(self, o):
"""
Signed less than
:param o: The other operand
:return: TrueResult(), FalseResult(), or MaybeResult()
"""
signed_bounds_1 = self._signed_bounds()
signed_bounds_2 = o._signed_bounds()
ret = [ ]
for lb_1, ub_1 in signed_bounds_1:
for lb_2, ub_2 in signed_bounds_2:
if ub_1 < lb_2:
ret.append(TrueResult())
elif lb_1 >= ub_2:
ret.append(FalseResult())
else:
ret.append(MaybeResult())
if all(r.identical(TrueResult()) for r in ret):
return TrueResult()
elif all(r.identical(FalseResult()) for r in ret):
return FalseResult()
else:
return MaybeResult() | python | def SLT(self, o):
"""
Signed less than
:param o: The other operand
:return: TrueResult(), FalseResult(), or MaybeResult()
"""
signed_bounds_1 = self._signed_bounds()
signed_bounds_2 = o._signed_bounds()
ret = [ ]
for lb_1, ub_1 in signed_bounds_1:
for lb_2, ub_2 in signed_bounds_2:
if ub_1 < lb_2:
ret.append(TrueResult())
elif lb_1 >= ub_2:
ret.append(FalseResult())
else:
ret.append(MaybeResult())
if all(r.identical(TrueResult()) for r in ret):
return TrueResult()
elif all(r.identical(FalseResult()) for r in ret):
return FalseResult()
else:
return MaybeResult() | [
"def",
"SLT",
"(",
"self",
",",
"o",
")",
":",
"signed_bounds_1",
"=",
"self",
".",
"_signed_bounds",
"(",
")",
"signed_bounds_2",
"=",
"o",
".",
"_signed_bounds",
"(",
")",
"ret",
"=",
"[",
"]",
"for",
"lb_1",
",",
"ub_1",
"in",
"signed_bounds_1",
":",
"for",
"lb_2",
",",
"ub_2",
"in",
"signed_bounds_2",
":",
"if",
"ub_1",
"<",
"lb_2",
":",
"ret",
".",
"append",
"(",
"TrueResult",
"(",
")",
")",
"elif",
"lb_1",
">=",
"ub_2",
":",
"ret",
".",
"append",
"(",
"FalseResult",
"(",
")",
")",
"else",
":",
"ret",
".",
"append",
"(",
"MaybeResult",
"(",
")",
")",
"if",
"all",
"(",
"r",
".",
"identical",
"(",
"TrueResult",
"(",
")",
")",
"for",
"r",
"in",
"ret",
")",
":",
"return",
"TrueResult",
"(",
")",
"elif",
"all",
"(",
"r",
".",
"identical",
"(",
"FalseResult",
"(",
")",
")",
"for",
"r",
"in",
"ret",
")",
":",
"return",
"FalseResult",
"(",
")",
"else",
":",
"return",
"MaybeResult",
"(",
")"
] | Signed less than
:param o: The other operand
:return: TrueResult(), FalseResult(), or MaybeResult() | [
"Signed",
"less",
"than"
] | train | https://github.com/angr/claripy/blob/4ed61924880af1ea8fb778047d896ec0156412a6/claripy/vsa/strided_interval.py#L712-L738 | 0.003645 |
oscarlazoarjona/fast | fast/angular_momentum.py | coupling_matrix_3j | def coupling_matrix_3j(j1, j2, j3):
ur"""For angular momenta $j_1, j_2, j_3$ the unitary transformation from the \
uncoupled basis into the $j = (j_1 \oplus j_2)\oplus j_3$ coupled basis.
>>> from sympy import Integer, pprint
>>> L = 0
>>> S = 1/Integer(2)
>>> II = 3/Integer(2)
>>> pprint(coupling_matrix_3j(L, S, II))
⎡ √3 ⎤
⎢0 -1/2 0 0 ── 0 0 0⎥
⎢ 2 ⎥
⎢ ⎥
⎢ -√2 √2 ⎥
⎢0 0 ──── 0 0 ── 0 0⎥
⎢ 2 2 ⎥
⎢ ⎥
⎢ -√3 ⎥
⎢0 0 0 ──── 0 0 1/2 0⎥
⎢ 2 ⎥
⎢ ⎥
⎢1 0 0 0 0 0 0 0⎥
⎢ ⎥
⎢ √3 ⎥
⎢0 ── 0 0 1/2 0 0 0⎥
⎢ 2 ⎥
⎢ ⎥
⎢ √2 √2 ⎥
⎢0 0 ── 0 0 ── 0 0⎥
⎢ 2 2 ⎥
⎢ ⎥
⎢ √3 ⎥
⎢0 0 0 1/2 0 0 ── 0⎥
⎢ 2 ⎥
⎢ ⎥
⎣0 0 0 0 0 0 0 1⎦
"""
idj3 = eye(2*j3+1)
Jper = perm_j(j1, j2)
U_Jj3_list = [coupling_matrix_2j(J, j3) for J in Jper]
size = sum([U_Jj3_list[i].shape[0] for i in range(len(Jper))])
U_Jj3 = zeros(size, size)
ind0 = 0
for i, U_Jj3i in enumerate(U_Jj3_list):
sizeJ = U_Jj3i.shape[0]
indf = ind0 + sizeJ
U_Jj3[ind0: indf, ind0: indf] = U_Jj3_list[i]
ind0 = indf
return U_Jj3*TensorProduct(coupling_matrix_2j(j1, j2), idj3) | python | def coupling_matrix_3j(j1, j2, j3):
ur"""For angular momenta $j_1, j_2, j_3$ the unitary transformation from the \
uncoupled basis into the $j = (j_1 \oplus j_2)\oplus j_3$ coupled basis.
>>> from sympy import Integer, pprint
>>> L = 0
>>> S = 1/Integer(2)
>>> II = 3/Integer(2)
>>> pprint(coupling_matrix_3j(L, S, II))
⎡ √3 ⎤
⎢0 -1/2 0 0 ── 0 0 0⎥
⎢ 2 ⎥
⎢ ⎥
⎢ -√2 √2 ⎥
⎢0 0 ──── 0 0 ── 0 0⎥
⎢ 2 2 ⎥
⎢ ⎥
⎢ -√3 ⎥
⎢0 0 0 ──── 0 0 1/2 0⎥
⎢ 2 ⎥
⎢ ⎥
⎢1 0 0 0 0 0 0 0⎥
⎢ ⎥
⎢ √3 ⎥
⎢0 ── 0 0 1/2 0 0 0⎥
⎢ 2 ⎥
⎢ ⎥
⎢ √2 √2 ⎥
⎢0 0 ── 0 0 ── 0 0⎥
⎢ 2 2 ⎥
⎢ ⎥
⎢ √3 ⎥
⎢0 0 0 1/2 0 0 ── 0⎥
⎢ 2 ⎥
⎢ ⎥
⎣0 0 0 0 0 0 0 1⎦
"""
idj3 = eye(2*j3+1)
Jper = perm_j(j1, j2)
U_Jj3_list = [coupling_matrix_2j(J, j3) for J in Jper]
size = sum([U_Jj3_list[i].shape[0] for i in range(len(Jper))])
U_Jj3 = zeros(size, size)
ind0 = 0
for i, U_Jj3i in enumerate(U_Jj3_list):
sizeJ = U_Jj3i.shape[0]
indf = ind0 + sizeJ
U_Jj3[ind0: indf, ind0: indf] = U_Jj3_list[i]
ind0 = indf
return U_Jj3*TensorProduct(coupling_matrix_2j(j1, j2), idj3) | [
"def",
"coupling_matrix_3j",
"(",
"j1",
",",
"j2",
",",
"j3",
")",
":",
"idj3",
"=",
"eye",
"(",
"2",
"*",
"j3",
"+",
"1",
")",
"Jper",
"=",
"perm_j",
"(",
"j1",
",",
"j2",
")",
"U_Jj3_list",
"=",
"[",
"coupling_matrix_2j",
"(",
"J",
",",
"j3",
")",
"for",
"J",
"in",
"Jper",
"]",
"size",
"=",
"sum",
"(",
"[",
"U_Jj3_list",
"[",
"i",
"]",
".",
"shape",
"[",
"0",
"]",
"for",
"i",
"in",
"range",
"(",
"len",
"(",
"Jper",
")",
")",
"]",
")",
"U_Jj3",
"=",
"zeros",
"(",
"size",
",",
"size",
")",
"ind0",
"=",
"0",
"for",
"i",
",",
"U_Jj3i",
"in",
"enumerate",
"(",
"U_Jj3_list",
")",
":",
"sizeJ",
"=",
"U_Jj3i",
".",
"shape",
"[",
"0",
"]",
"indf",
"=",
"ind0",
"+",
"sizeJ",
"U_Jj3",
"[",
"ind0",
":",
"indf",
",",
"ind0",
":",
"indf",
"]",
"=",
"U_Jj3_list",
"[",
"i",
"]",
"ind0",
"=",
"indf",
"return",
"U_Jj3",
"*",
"TensorProduct",
"(",
"coupling_matrix_2j",
"(",
"j1",
",",
"j2",
")",
",",
"idj3",
")"
] | ur"""For angular momenta $j_1, j_2, j_3$ the unitary transformation from the \
uncoupled basis into the $j = (j_1 \oplus j_2)\oplus j_3$ coupled basis.
>>> from sympy import Integer, pprint
>>> L = 0
>>> S = 1/Integer(2)
>>> II = 3/Integer(2)
>>> pprint(coupling_matrix_3j(L, S, II))
⎡ √3 ⎤
⎢0 -1/2 0 0 ── 0 0 0⎥
⎢ 2 ⎥
⎢ ⎥
⎢ -√2 √2 ⎥
⎢0 0 ──── 0 0 ── 0 0⎥
⎢ 2 2 ⎥
⎢ ⎥
⎢ -√3 ⎥
⎢0 0 0 ──── 0 0 1/2 0⎥
⎢ 2 ⎥
⎢ ⎥
⎢1 0 0 0 0 0 0 0⎥
⎢ ⎥
⎢ √3 ⎥
⎢0 ── 0 0 1/2 0 0 0⎥
⎢ 2 ⎥
⎢ ⎥
⎢ √2 √2 ⎥
⎢0 0 ── 0 0 ── 0 0⎥
⎢ 2 2 ⎥
⎢ ⎥
⎢ √3 ⎥
⎢0 0 0 1/2 0 0 ── 0⎥
⎢ 2 ⎥
⎢ ⎥
⎣0 0 0 0 0 0 0 1⎦ | [
"ur",
"For",
"angular",
"momenta",
"$j_1",
"j_2",
"j_3$",
"the",
"unitary",
"transformation",
"from",
"the",
"\\",
"uncoupled",
"basis",
"into",
"the",
"$j",
"=",
"(",
"j_1",
"\\",
"oplus",
"j_2",
")",
"\\",
"oplus",
"j_3$",
"coupled",
"basis",
"."
] | train | https://github.com/oscarlazoarjona/fast/blob/3e5400672af2a7b7cc616e7f4aa10d7672720222/fast/angular_momentum.py#L116-L166 | 0.00202 |
python-openxml/python-docx | docx/oxml/numbering.py | CT_Numbering.num_having_numId | def num_having_numId(self, numId):
"""
Return the ``<w:num>`` child element having ``numId`` attribute
matching *numId*.
"""
xpath = './w:num[@w:numId="%d"]' % numId
try:
return self.xpath(xpath)[0]
except IndexError:
raise KeyError('no <w:num> element with numId %d' % numId) | python | def num_having_numId(self, numId):
"""
Return the ``<w:num>`` child element having ``numId`` attribute
matching *numId*.
"""
xpath = './w:num[@w:numId="%d"]' % numId
try:
return self.xpath(xpath)[0]
except IndexError:
raise KeyError('no <w:num> element with numId %d' % numId) | [
"def",
"num_having_numId",
"(",
"self",
",",
"numId",
")",
":",
"xpath",
"=",
"'./w:num[@w:numId=\"%d\"]'",
"%",
"numId",
"try",
":",
"return",
"self",
".",
"xpath",
"(",
"xpath",
")",
"[",
"0",
"]",
"except",
"IndexError",
":",
"raise",
"KeyError",
"(",
"'no <w:num> element with numId %d'",
"%",
"numId",
")"
] | Return the ``<w:num>`` child element having ``numId`` attribute
matching *numId*. | [
"Return",
"the",
"<w",
":",
"num",
">",
"child",
"element",
"having",
"numId",
"attribute",
"matching",
"*",
"numId",
"*",
"."
] | train | https://github.com/python-openxml/python-docx/blob/6756f6cd145511d3eb6d1d188beea391b1ddfd53/docx/oxml/numbering.py#L108-L117 | 0.005618 |
openfisca/openfisca-survey-manager | openfisca_survey_manager/input_dataframe_generator.py | randomly_init_variable | def randomly_init_variable(tax_benefit_system, input_dataframe_by_entity, variable_name, max_value, condition = None, seed = None):
"""
Initialise a variable with random values (from 0 to max_value).
If a condition vector is provided, only set the value of persons or groups for which condition is True.
Exemple:
>>> from openfisca_survey_manager.input_dataframe_generator import make_input_dataframe_by_entity
>>> from openfisca_country_template import CountryTaxBenefitSystem
>>> tbs = CountryTaxBenefitSystem()
>>> input_dataframe_by_entity = make_input_dataframe_by_entity(tbs, 400, 100)
>>> randomly_init_variable(tbs, input_dataframe_by_entity, 'salary', max_value = 50000, condition = "household_role == 'first_parent'") # Randomly set a salaire_net for all persons between 0 and 50000?
>>> sorted(input_dataframe_by_entity['person'].columns.tolist())
['household_id', 'household_legacy_role', 'household_role', 'person_id', 'salary']
>>> input_dataframe_by_entity['person'].salary.max() <= 50000
True
>>> len(input_dataframe_by_entity['person'].salary)
400
>>> randomly_init_variable(tbs, input_dataframe_by_entity, 'rent', max_value = 1000)
>>> sorted(input_dataframe_by_entity['household'].columns.tolist())
['rent']
>>> input_dataframe_by_entity['household'].rent.max() <= 1000
True
>>> input_dataframe_by_entity['household'].rent.max() >= 1
True
>>> len(input_dataframe_by_entity['household'].rent)
100
"""
variable = tax_benefit_system.variables[variable_name]
entity = variable.entity
if condition is None:
condition = True
else:
condition = input_dataframe_by_entity[entity.key].eval(condition).values
if seed is None:
seed = 42
np.random.seed(seed)
count = len(input_dataframe_by_entity[entity.key])
value = (np.random.rand(count) * max_value * condition).astype(variable.dtype)
input_dataframe_by_entity[entity.key][variable_name] = value | python | def randomly_init_variable(tax_benefit_system, input_dataframe_by_entity, variable_name, max_value, condition = None, seed = None):
"""
Initialise a variable with random values (from 0 to max_value).
If a condition vector is provided, only set the value of persons or groups for which condition is True.
Exemple:
>>> from openfisca_survey_manager.input_dataframe_generator import make_input_dataframe_by_entity
>>> from openfisca_country_template import CountryTaxBenefitSystem
>>> tbs = CountryTaxBenefitSystem()
>>> input_dataframe_by_entity = make_input_dataframe_by_entity(tbs, 400, 100)
>>> randomly_init_variable(tbs, input_dataframe_by_entity, 'salary', max_value = 50000, condition = "household_role == 'first_parent'") # Randomly set a salaire_net for all persons between 0 and 50000?
>>> sorted(input_dataframe_by_entity['person'].columns.tolist())
['household_id', 'household_legacy_role', 'household_role', 'person_id', 'salary']
>>> input_dataframe_by_entity['person'].salary.max() <= 50000
True
>>> len(input_dataframe_by_entity['person'].salary)
400
>>> randomly_init_variable(tbs, input_dataframe_by_entity, 'rent', max_value = 1000)
>>> sorted(input_dataframe_by_entity['household'].columns.tolist())
['rent']
>>> input_dataframe_by_entity['household'].rent.max() <= 1000
True
>>> input_dataframe_by_entity['household'].rent.max() >= 1
True
>>> len(input_dataframe_by_entity['household'].rent)
100
"""
variable = tax_benefit_system.variables[variable_name]
entity = variable.entity
if condition is None:
condition = True
else:
condition = input_dataframe_by_entity[entity.key].eval(condition).values
if seed is None:
seed = 42
np.random.seed(seed)
count = len(input_dataframe_by_entity[entity.key])
value = (np.random.rand(count) * max_value * condition).astype(variable.dtype)
input_dataframe_by_entity[entity.key][variable_name] = value | [
"def",
"randomly_init_variable",
"(",
"tax_benefit_system",
",",
"input_dataframe_by_entity",
",",
"variable_name",
",",
"max_value",
",",
"condition",
"=",
"None",
",",
"seed",
"=",
"None",
")",
":",
"variable",
"=",
"tax_benefit_system",
".",
"variables",
"[",
"variable_name",
"]",
"entity",
"=",
"variable",
".",
"entity",
"if",
"condition",
"is",
"None",
":",
"condition",
"=",
"True",
"else",
":",
"condition",
"=",
"input_dataframe_by_entity",
"[",
"entity",
".",
"key",
"]",
".",
"eval",
"(",
"condition",
")",
".",
"values",
"if",
"seed",
"is",
"None",
":",
"seed",
"=",
"42",
"np",
".",
"random",
".",
"seed",
"(",
"seed",
")",
"count",
"=",
"len",
"(",
"input_dataframe_by_entity",
"[",
"entity",
".",
"key",
"]",
")",
"value",
"=",
"(",
"np",
".",
"random",
".",
"rand",
"(",
"count",
")",
"*",
"max_value",
"*",
"condition",
")",
".",
"astype",
"(",
"variable",
".",
"dtype",
")",
"input_dataframe_by_entity",
"[",
"entity",
".",
"key",
"]",
"[",
"variable_name",
"]",
"=",
"value"
] | Initialise a variable with random values (from 0 to max_value).
If a condition vector is provided, only set the value of persons or groups for which condition is True.
Exemple:
>>> from openfisca_survey_manager.input_dataframe_generator import make_input_dataframe_by_entity
>>> from openfisca_country_template import CountryTaxBenefitSystem
>>> tbs = CountryTaxBenefitSystem()
>>> input_dataframe_by_entity = make_input_dataframe_by_entity(tbs, 400, 100)
>>> randomly_init_variable(tbs, input_dataframe_by_entity, 'salary', max_value = 50000, condition = "household_role == 'first_parent'") # Randomly set a salaire_net for all persons between 0 and 50000?
>>> sorted(input_dataframe_by_entity['person'].columns.tolist())
['household_id', 'household_legacy_role', 'household_role', 'person_id', 'salary']
>>> input_dataframe_by_entity['person'].salary.max() <= 50000
True
>>> len(input_dataframe_by_entity['person'].salary)
400
>>> randomly_init_variable(tbs, input_dataframe_by_entity, 'rent', max_value = 1000)
>>> sorted(input_dataframe_by_entity['household'].columns.tolist())
['rent']
>>> input_dataframe_by_entity['household'].rent.max() <= 1000
True
>>> input_dataframe_by_entity['household'].rent.max() >= 1
True
>>> len(input_dataframe_by_entity['household'].rent)
100 | [
"Initialise",
"a",
"variable",
"with",
"random",
"values",
"(",
"from",
"0",
"to",
"max_value",
")",
".",
"If",
"a",
"condition",
"vector",
"is",
"provided",
"only",
"set",
"the",
"value",
"of",
"persons",
"or",
"groups",
"for",
"which",
"condition",
"is",
"True",
"."
] | train | https://github.com/openfisca/openfisca-survey-manager/blob/bed6c65dc5e4ec2bdc9cda5b865fefd9e3d0c358/openfisca_survey_manager/input_dataframe_generator.py#L124-L167 | 0.00661 |
qweeze/wex-api-client | wex/client.py | Client.trans_history | def trans_history(
self, from_=None, count=None, from_id=None, end_id=None,
order=None, since=None, end=None
):
"""
Returns the history of transactions.
To use this method you need a privilege of the info key.
:param int or None from_: transaction ID, from which the display starts (default 0)
:param int or None count: number of transaction to be displayed (default 1000)
:param int or None from_id: transaction ID, from which the display starts (default 0)
:param int or None end_id: transaction ID on which the display ends (default inf.)
:param str or None order: sorting (default 'DESC')
:param int or None since: the time to start the display (default 0)
:param int or None end: the time to end the display (default inf.)
"""
return self._trade_api_call(
'TransHistory', from_=from_, count=count, from_id=from_id, end_id=end_id,
order=order, since=since, end=end
) | python | def trans_history(
self, from_=None, count=None, from_id=None, end_id=None,
order=None, since=None, end=None
):
"""
Returns the history of transactions.
To use this method you need a privilege of the info key.
:param int or None from_: transaction ID, from which the display starts (default 0)
:param int or None count: number of transaction to be displayed (default 1000)
:param int or None from_id: transaction ID, from which the display starts (default 0)
:param int or None end_id: transaction ID on which the display ends (default inf.)
:param str or None order: sorting (default 'DESC')
:param int or None since: the time to start the display (default 0)
:param int or None end: the time to end the display (default inf.)
"""
return self._trade_api_call(
'TransHistory', from_=from_, count=count, from_id=from_id, end_id=end_id,
order=order, since=since, end=end
) | [
"def",
"trans_history",
"(",
"self",
",",
"from_",
"=",
"None",
",",
"count",
"=",
"None",
",",
"from_id",
"=",
"None",
",",
"end_id",
"=",
"None",
",",
"order",
"=",
"None",
",",
"since",
"=",
"None",
",",
"end",
"=",
"None",
")",
":",
"return",
"self",
".",
"_trade_api_call",
"(",
"'TransHistory'",
",",
"from_",
"=",
"from_",
",",
"count",
"=",
"count",
",",
"from_id",
"=",
"from_id",
",",
"end_id",
"=",
"end_id",
",",
"order",
"=",
"order",
",",
"since",
"=",
"since",
",",
"end",
"=",
"end",
")"
] | Returns the history of transactions.
To use this method you need a privilege of the info key.
:param int or None from_: transaction ID, from which the display starts (default 0)
:param int or None count: number of transaction to be displayed (default 1000)
:param int or None from_id: transaction ID, from which the display starts (default 0)
:param int or None end_id: transaction ID on which the display ends (default inf.)
:param str or None order: sorting (default 'DESC')
:param int or None since: the time to start the display (default 0)
:param int or None end: the time to end the display (default inf.) | [
"Returns",
"the",
"history",
"of",
"transactions",
".",
"To",
"use",
"this",
"method",
"you",
"need",
"a",
"privilege",
"of",
"the",
"info",
"key",
"."
] | train | https://github.com/qweeze/wex-api-client/blob/e84d139be229aab2c7c5eda5976b812be651807b/wex/client.py#L182-L201 | 0.007851 |
Kaggle/kaggle-api | kaggle/api/kaggle_api_extended.py | KaggleApi.process_response | def process_response(self, result):
""" process a response from the API. We check the API version against
the client's to see if it's old, and give them a warning (once)
Parameters
==========
result: the result from the API
"""
if len(result) == 3:
data = result[0]
headers = result[2]
if self.HEADER_API_VERSION in headers:
api_version = headers[self.HEADER_API_VERSION]
if (not self.already_printed_version_warning
and not self.is_up_to_date(api_version)):
print('Warning: Looks like you\'re using an outdated API '
'Version, please consider updating (server ' +
api_version + ' / client ' + self.__version__ + ')')
self.already_printed_version_warning = True
return data
return result | python | def process_response(self, result):
""" process a response from the API. We check the API version against
the client's to see if it's old, and give them a warning (once)
Parameters
==========
result: the result from the API
"""
if len(result) == 3:
data = result[0]
headers = result[2]
if self.HEADER_API_VERSION in headers:
api_version = headers[self.HEADER_API_VERSION]
if (not self.already_printed_version_warning
and not self.is_up_to_date(api_version)):
print('Warning: Looks like you\'re using an outdated API '
'Version, please consider updating (server ' +
api_version + ' / client ' + self.__version__ + ')')
self.already_printed_version_warning = True
return data
return result | [
"def",
"process_response",
"(",
"self",
",",
"result",
")",
":",
"if",
"len",
"(",
"result",
")",
"==",
"3",
":",
"data",
"=",
"result",
"[",
"0",
"]",
"headers",
"=",
"result",
"[",
"2",
"]",
"if",
"self",
".",
"HEADER_API_VERSION",
"in",
"headers",
":",
"api_version",
"=",
"headers",
"[",
"self",
".",
"HEADER_API_VERSION",
"]",
"if",
"(",
"not",
"self",
".",
"already_printed_version_warning",
"and",
"not",
"self",
".",
"is_up_to_date",
"(",
"api_version",
")",
")",
":",
"print",
"(",
"'Warning: Looks like you\\'re using an outdated API '",
"'Version, please consider updating (server '",
"+",
"api_version",
"+",
"' / client '",
"+",
"self",
".",
"__version__",
"+",
"')'",
")",
"self",
".",
"already_printed_version_warning",
"=",
"True",
"return",
"data",
"return",
"result"
] | process a response from the API. We check the API version against
the client's to see if it's old, and give them a warning (once)
Parameters
==========
result: the result from the API | [
"process",
"a",
"response",
"from",
"the",
"API",
".",
"We",
"check",
"the",
"API",
"version",
"against",
"the",
"client",
"s",
"to",
"see",
"if",
"it",
"s",
"old",
"and",
"give",
"them",
"a",
"warning",
"(",
"once",
")"
] | train | https://github.com/Kaggle/kaggle-api/blob/65f14b1386470c5784d4753e491478e7537660d9/kaggle/api/kaggle_api_extended.py#L2181-L2201 | 0.002075 |
manns/pyspread | pyspread/src/model/model.py | DataArray.get_row_height | def get_row_height(self, row, tab):
"""Returns row height"""
try:
return self.row_heights[(row, tab)]
except KeyError:
return config["default_row_height"] | python | def get_row_height(self, row, tab):
"""Returns row height"""
try:
return self.row_heights[(row, tab)]
except KeyError:
return config["default_row_height"] | [
"def",
"get_row_height",
"(",
"self",
",",
"row",
",",
"tab",
")",
":",
"try",
":",
"return",
"self",
".",
"row_heights",
"[",
"(",
"row",
",",
"tab",
")",
"]",
"except",
"KeyError",
":",
"return",
"config",
"[",
"\"default_row_height\"",
"]"
] | Returns row height | [
"Returns",
"row",
"height"
] | train | https://github.com/manns/pyspread/blob/0e2fd44c2e0f06605efc3058c20a43a8c1f9e7e0/pyspread/src/model/model.py#L465-L472 | 0.009804 |
pydanny-archive/django-uni-form | uni_form/utils.py | render_field | def render_field(field, form, form_style, context, template=None, labelclass=None, layout_object=None):
"""
Renders a django-uni-form field
:param field: Can be a string or a Layout object like `Row`. If it's a layout
object, we call its render method, otherwise we instantiate a BoundField
and render it using default template 'uni_form/field.html'
The field is added to a list that the form holds called `rendered_fields`
to avoid double rendering fields.
:param form: The form/formset to which that field belongs to.
:param form_style: We need this to render uni-form divs using helper's chosen
style.
:template: Template used for rendering the field.
:layout_object: If passed, it points to the Layout object that is being rendered.
We use it to store its bound fields in a list called `layout_object.bound_fields`
"""
FAIL_SILENTLY = getattr(settings, 'UNIFORM_FAIL_SILENTLY', True)
if hasattr(field, 'render'):
return field.render(form, form_style, context)
else:
# This allows fields to be unicode strings, always they don't use non ASCII
try:
if isinstance(field, unicode):
field = str(field)
# If `field` is not unicode then we turn it into a unicode string, otherwise doing
# str(field) would give no error and the field would not be resolved, causing confusion
else:
field = str(unicode(field))
except (UnicodeEncodeError, UnicodeDecodeError):
raise Exception("Field '%s' is using forbidden unicode characters" % field)
try:
field_instance = form.fields[field]
except KeyError:
if not FAIL_SILENTLY:
raise Exception("Could not resolve form field '%s'." % field)
else:
field_instance = None
logging.warning("Could not resolve form field '%s'." % field, exc_info=sys.exc_info())
if not field in form.rendered_fields:
form.rendered_fields.append(field)
else:
if not FAIL_SILENTLY:
raise Exception("A field should only be rendered once: %s" % field)
else:
logging.warning("A field should only be rendered once: %s" % field, exc_info=sys.exc_info())
if field_instance is None:
html = ''
else:
bound_field = BoundField(form, field_instance, field)
if template is None:
template = default_field_template
else:
template = get_template(template)
# We save the Layout object's bound fields in the layout object's `bound_fields` list
if layout_object is not None:
layout_object.bound_fields.append(bound_field)
html = template.render(Context({'field': bound_field, 'labelclass': labelclass}))
return html | python | def render_field(field, form, form_style, context, template=None, labelclass=None, layout_object=None):
"""
Renders a django-uni-form field
:param field: Can be a string or a Layout object like `Row`. If it's a layout
object, we call its render method, otherwise we instantiate a BoundField
and render it using default template 'uni_form/field.html'
The field is added to a list that the form holds called `rendered_fields`
to avoid double rendering fields.
:param form: The form/formset to which that field belongs to.
:param form_style: We need this to render uni-form divs using helper's chosen
style.
:template: Template used for rendering the field.
:layout_object: If passed, it points to the Layout object that is being rendered.
We use it to store its bound fields in a list called `layout_object.bound_fields`
"""
FAIL_SILENTLY = getattr(settings, 'UNIFORM_FAIL_SILENTLY', True)
if hasattr(field, 'render'):
return field.render(form, form_style, context)
else:
# This allows fields to be unicode strings, always they don't use non ASCII
try:
if isinstance(field, unicode):
field = str(field)
# If `field` is not unicode then we turn it into a unicode string, otherwise doing
# str(field) would give no error and the field would not be resolved, causing confusion
else:
field = str(unicode(field))
except (UnicodeEncodeError, UnicodeDecodeError):
raise Exception("Field '%s' is using forbidden unicode characters" % field)
try:
field_instance = form.fields[field]
except KeyError:
if not FAIL_SILENTLY:
raise Exception("Could not resolve form field '%s'." % field)
else:
field_instance = None
logging.warning("Could not resolve form field '%s'." % field, exc_info=sys.exc_info())
if not field in form.rendered_fields:
form.rendered_fields.append(field)
else:
if not FAIL_SILENTLY:
raise Exception("A field should only be rendered once: %s" % field)
else:
logging.warning("A field should only be rendered once: %s" % field, exc_info=sys.exc_info())
if field_instance is None:
html = ''
else:
bound_field = BoundField(form, field_instance, field)
if template is None:
template = default_field_template
else:
template = get_template(template)
# We save the Layout object's bound fields in the layout object's `bound_fields` list
if layout_object is not None:
layout_object.bound_fields.append(bound_field)
html = template.render(Context({'field': bound_field, 'labelclass': labelclass}))
return html | [
"def",
"render_field",
"(",
"field",
",",
"form",
",",
"form_style",
",",
"context",
",",
"template",
"=",
"None",
",",
"labelclass",
"=",
"None",
",",
"layout_object",
"=",
"None",
")",
":",
"FAIL_SILENTLY",
"=",
"getattr",
"(",
"settings",
",",
"'UNIFORM_FAIL_SILENTLY'",
",",
"True",
")",
"if",
"hasattr",
"(",
"field",
",",
"'render'",
")",
":",
"return",
"field",
".",
"render",
"(",
"form",
",",
"form_style",
",",
"context",
")",
"else",
":",
"# This allows fields to be unicode strings, always they don't use non ASCII",
"try",
":",
"if",
"isinstance",
"(",
"field",
",",
"unicode",
")",
":",
"field",
"=",
"str",
"(",
"field",
")",
"# If `field` is not unicode then we turn it into a unicode string, otherwise doing",
"# str(field) would give no error and the field would not be resolved, causing confusion ",
"else",
":",
"field",
"=",
"str",
"(",
"unicode",
"(",
"field",
")",
")",
"except",
"(",
"UnicodeEncodeError",
",",
"UnicodeDecodeError",
")",
":",
"raise",
"Exception",
"(",
"\"Field '%s' is using forbidden unicode characters\"",
"%",
"field",
")",
"try",
":",
"field_instance",
"=",
"form",
".",
"fields",
"[",
"field",
"]",
"except",
"KeyError",
":",
"if",
"not",
"FAIL_SILENTLY",
":",
"raise",
"Exception",
"(",
"\"Could not resolve form field '%s'.\"",
"%",
"field",
")",
"else",
":",
"field_instance",
"=",
"None",
"logging",
".",
"warning",
"(",
"\"Could not resolve form field '%s'.\"",
"%",
"field",
",",
"exc_info",
"=",
"sys",
".",
"exc_info",
"(",
")",
")",
"if",
"not",
"field",
"in",
"form",
".",
"rendered_fields",
":",
"form",
".",
"rendered_fields",
".",
"append",
"(",
"field",
")",
"else",
":",
"if",
"not",
"FAIL_SILENTLY",
":",
"raise",
"Exception",
"(",
"\"A field should only be rendered once: %s\"",
"%",
"field",
")",
"else",
":",
"logging",
".",
"warning",
"(",
"\"A field should only be rendered once: %s\"",
"%",
"field",
",",
"exc_info",
"=",
"sys",
".",
"exc_info",
"(",
")",
")",
"if",
"field_instance",
"is",
"None",
":",
"html",
"=",
"''",
"else",
":",
"bound_field",
"=",
"BoundField",
"(",
"form",
",",
"field_instance",
",",
"field",
")",
"if",
"template",
"is",
"None",
":",
"template",
"=",
"default_field_template",
"else",
":",
"template",
"=",
"get_template",
"(",
"template",
")",
"# We save the Layout object's bound fields in the layout object's `bound_fields` list",
"if",
"layout_object",
"is",
"not",
"None",
":",
"layout_object",
".",
"bound_fields",
".",
"append",
"(",
"bound_field",
")",
"html",
"=",
"template",
".",
"render",
"(",
"Context",
"(",
"{",
"'field'",
":",
"bound_field",
",",
"'labelclass'",
":",
"labelclass",
"}",
")",
")",
"return",
"html"
] | Renders a django-uni-form field
:param field: Can be a string or a Layout object like `Row`. If it's a layout
object, we call its render method, otherwise we instantiate a BoundField
and render it using default template 'uni_form/field.html'
The field is added to a list that the form holds called `rendered_fields`
to avoid double rendering fields.
:param form: The form/formset to which that field belongs to.
:param form_style: We need this to render uni-form divs using helper's chosen
style.
:template: Template used for rendering the field.
:layout_object: If passed, it points to the Layout object that is being rendered.
We use it to store its bound fields in a list called `layout_object.bound_fields` | [
"Renders",
"a",
"django",
"-",
"uni",
"-",
"form",
"field",
":",
"param",
"field",
":",
"Can",
"be",
"a",
"string",
"or",
"a",
"Layout",
"object",
"like",
"Row",
".",
"If",
"it",
"s",
"a",
"layout",
"object",
"we",
"call",
"its",
"render",
"method",
"otherwise",
"we",
"instantiate",
"a",
"BoundField",
"and",
"render",
"it",
"using",
"default",
"template",
"uni_form",
"/",
"field",
".",
"html",
"The",
"field",
"is",
"added",
"to",
"a",
"list",
"that",
"the",
"form",
"holds",
"called",
"rendered_fields",
"to",
"avoid",
"double",
"rendering",
"fields",
"."
] | train | https://github.com/pydanny-archive/django-uni-form/blob/159f539e2fb98752b7964d75e955fc62881c28fb/uni_form/utils.py#L14-L84 | 0.008253 |
google/python-gflags | gflags/flagvalues.py | FlagValues.FlagsIntoString | def FlagsIntoString(self):
"""Returns a string with the flags assignments from this FlagValues object.
This function ignores flags whose value is None. Each flag
assignment is separated by a newline.
NOTE: MUST mirror the behavior of the C++ CommandlineFlagsIntoString
from http://code.google.com/p/google-gflags
Returns:
string with the flags assignments from this FlagValues object.
"""
s = ''
for flag in self.FlagDict().values():
if flag.value is not None:
s += flag.serialize() + '\n'
return s | python | def FlagsIntoString(self):
"""Returns a string with the flags assignments from this FlagValues object.
This function ignores flags whose value is None. Each flag
assignment is separated by a newline.
NOTE: MUST mirror the behavior of the C++ CommandlineFlagsIntoString
from http://code.google.com/p/google-gflags
Returns:
string with the flags assignments from this FlagValues object.
"""
s = ''
for flag in self.FlagDict().values():
if flag.value is not None:
s += flag.serialize() + '\n'
return s | [
"def",
"FlagsIntoString",
"(",
"self",
")",
":",
"s",
"=",
"''",
"for",
"flag",
"in",
"self",
".",
"FlagDict",
"(",
")",
".",
"values",
"(",
")",
":",
"if",
"flag",
".",
"value",
"is",
"not",
"None",
":",
"s",
"+=",
"flag",
".",
"serialize",
"(",
")",
"+",
"'\\n'",
"return",
"s"
] | Returns a string with the flags assignments from this FlagValues object.
This function ignores flags whose value is None. Each flag
assignment is separated by a newline.
NOTE: MUST mirror the behavior of the C++ CommandlineFlagsIntoString
from http://code.google.com/p/google-gflags
Returns:
string with the flags assignments from this FlagValues object. | [
"Returns",
"a",
"string",
"with",
"the",
"flags",
"assignments",
"from",
"this",
"FlagValues",
"object",
"."
] | train | https://github.com/google/python-gflags/blob/4f06c3d0d6cbe9b1fb90ee9fb1c082b3bf9285f6/gflags/flagvalues.py#L1178-L1194 | 0.003559 |
RedFantom/ttkwidgets | ttkwidgets/timeline.py | TimeLine.draw_categories | def draw_categories(self):
"""Draw the category labels on the Canvas"""
for label in self._category_labels.values():
label.destroy()
self._category_labels.clear()
canvas_width = 0
for category in (sorted(self._categories.keys() if isinstance(self._categories, dict) else self._categories)
if not isinstance(self._categories, OrderedDict)
else self._categories):
kwargs = self._categories[category] if isinstance(self._categories, dict) else {"text": category}
kwargs["background"] = kwargs.get("background", self._background)
kwargs["justify"] = kwargs.get("justify", tk.LEFT)
label = ttk.Label(self._frame_categories, **kwargs)
width = label.winfo_reqwidth()
canvas_width = width if width > canvas_width else canvas_width
self._category_labels[category] = label
self._canvas_categories.create_window(0, 0, window=self._frame_categories, anchor=tk.NW)
self._canvas_categories.config(width=canvas_width + 5, height=self._height) | python | def draw_categories(self):
"""Draw the category labels on the Canvas"""
for label in self._category_labels.values():
label.destroy()
self._category_labels.clear()
canvas_width = 0
for category in (sorted(self._categories.keys() if isinstance(self._categories, dict) else self._categories)
if not isinstance(self._categories, OrderedDict)
else self._categories):
kwargs = self._categories[category] if isinstance(self._categories, dict) else {"text": category}
kwargs["background"] = kwargs.get("background", self._background)
kwargs["justify"] = kwargs.get("justify", tk.LEFT)
label = ttk.Label(self._frame_categories, **kwargs)
width = label.winfo_reqwidth()
canvas_width = width if width > canvas_width else canvas_width
self._category_labels[category] = label
self._canvas_categories.create_window(0, 0, window=self._frame_categories, anchor=tk.NW)
self._canvas_categories.config(width=canvas_width + 5, height=self._height) | [
"def",
"draw_categories",
"(",
"self",
")",
":",
"for",
"label",
"in",
"self",
".",
"_category_labels",
".",
"values",
"(",
")",
":",
"label",
".",
"destroy",
"(",
")",
"self",
".",
"_category_labels",
".",
"clear",
"(",
")",
"canvas_width",
"=",
"0",
"for",
"category",
"in",
"(",
"sorted",
"(",
"self",
".",
"_categories",
".",
"keys",
"(",
")",
"if",
"isinstance",
"(",
"self",
".",
"_categories",
",",
"dict",
")",
"else",
"self",
".",
"_categories",
")",
"if",
"not",
"isinstance",
"(",
"self",
".",
"_categories",
",",
"OrderedDict",
")",
"else",
"self",
".",
"_categories",
")",
":",
"kwargs",
"=",
"self",
".",
"_categories",
"[",
"category",
"]",
"if",
"isinstance",
"(",
"self",
".",
"_categories",
",",
"dict",
")",
"else",
"{",
"\"text\"",
":",
"category",
"}",
"kwargs",
"[",
"\"background\"",
"]",
"=",
"kwargs",
".",
"get",
"(",
"\"background\"",
",",
"self",
".",
"_background",
")",
"kwargs",
"[",
"\"justify\"",
"]",
"=",
"kwargs",
".",
"get",
"(",
"\"justify\"",
",",
"tk",
".",
"LEFT",
")",
"label",
"=",
"ttk",
".",
"Label",
"(",
"self",
".",
"_frame_categories",
",",
"*",
"*",
"kwargs",
")",
"width",
"=",
"label",
".",
"winfo_reqwidth",
"(",
")",
"canvas_width",
"=",
"width",
"if",
"width",
">",
"canvas_width",
"else",
"canvas_width",
"self",
".",
"_category_labels",
"[",
"category",
"]",
"=",
"label",
"self",
".",
"_canvas_categories",
".",
"create_window",
"(",
"0",
",",
"0",
",",
"window",
"=",
"self",
".",
"_frame_categories",
",",
"anchor",
"=",
"tk",
".",
"NW",
")",
"self",
".",
"_canvas_categories",
".",
"config",
"(",
"width",
"=",
"canvas_width",
"+",
"5",
",",
"height",
"=",
"self",
".",
"_height",
")"
] | Draw the category labels on the Canvas | [
"Draw",
"the",
"category",
"labels",
"on",
"the",
"Canvas"
] | train | https://github.com/RedFantom/ttkwidgets/blob/02150322060f867b6e59a175522ef84b09168019/ttkwidgets/timeline.py#L338-L355 | 0.005314 |
gabstopper/smc-python | smc/core/route.py | PolicyRoute.delete | def delete(self, **kw):
"""
Delete a policy route from the engine. You can delete using a
single field or multiple fields for a more exact match.
Use a keyword argument to delete a route by any valid attribute.
:param kw: use valid Route keyword values to delete by exact match
"""
delete_by = []
for field, val in kw.items():
if val is not None:
delete_by.append(field)
self.items[:] = [route for route in self.items
if not all(route.get(field) == kw.get(field)
for field in delete_by)] | python | def delete(self, **kw):
"""
Delete a policy route from the engine. You can delete using a
single field or multiple fields for a more exact match.
Use a keyword argument to delete a route by any valid attribute.
:param kw: use valid Route keyword values to delete by exact match
"""
delete_by = []
for field, val in kw.items():
if val is not None:
delete_by.append(field)
self.items[:] = [route for route in self.items
if not all(route.get(field) == kw.get(field)
for field in delete_by)] | [
"def",
"delete",
"(",
"self",
",",
"*",
"*",
"kw",
")",
":",
"delete_by",
"=",
"[",
"]",
"for",
"field",
",",
"val",
"in",
"kw",
".",
"items",
"(",
")",
":",
"if",
"val",
"is",
"not",
"None",
":",
"delete_by",
".",
"append",
"(",
"field",
")",
"self",
".",
"items",
"[",
":",
"]",
"=",
"[",
"route",
"for",
"route",
"in",
"self",
".",
"items",
"if",
"not",
"all",
"(",
"route",
".",
"get",
"(",
"field",
")",
"==",
"kw",
".",
"get",
"(",
"field",
")",
"for",
"field",
"in",
"delete_by",
")",
"]"
] | Delete a policy route from the engine. You can delete using a
single field or multiple fields for a more exact match.
Use a keyword argument to delete a route by any valid attribute.
:param kw: use valid Route keyword values to delete by exact match | [
"Delete",
"a",
"policy",
"route",
"from",
"the",
"engine",
".",
"You",
"can",
"delete",
"using",
"a",
"single",
"field",
"or",
"multiple",
"fields",
"for",
"a",
"more",
"exact",
"match",
".",
"Use",
"a",
"keyword",
"argument",
"to",
"delete",
"a",
"route",
"by",
"any",
"valid",
"attribute",
".",
":",
"param",
"kw",
":",
"use",
"valid",
"Route",
"keyword",
"values",
"to",
"delete",
"by",
"exact",
"match"
] | train | https://github.com/gabstopper/smc-python/blob/e027b8a5dcfaf884eada32d113d41c1e56b32457/smc/core/route.py#L1048-L1063 | 0.006006 |
has2k1/plotnine | plotnine/scales/scales.py | make_scale | def make_scale(ae, series, *args, **kwargs):
"""
Return a proper scale object for the series
The scale is for the aesthetic ae, and args & kwargs
are passed on to the scale creating class
"""
stype = scale_type(series)
# filter parameters by scale type
if stype == 'discrete':
with suppress(KeyError):
del kwargs['trans']
scale_name = 'scale_{}_{}'.format(ae, stype)
scale_klass = Registry[scale_name]
return scale_klass(*args, **kwargs) | python | def make_scale(ae, series, *args, **kwargs):
"""
Return a proper scale object for the series
The scale is for the aesthetic ae, and args & kwargs
are passed on to the scale creating class
"""
stype = scale_type(series)
# filter parameters by scale type
if stype == 'discrete':
with suppress(KeyError):
del kwargs['trans']
scale_name = 'scale_{}_{}'.format(ae, stype)
scale_klass = Registry[scale_name]
return scale_klass(*args, **kwargs) | [
"def",
"make_scale",
"(",
"ae",
",",
"series",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"stype",
"=",
"scale_type",
"(",
"series",
")",
"# filter parameters by scale type",
"if",
"stype",
"==",
"'discrete'",
":",
"with",
"suppress",
"(",
"KeyError",
")",
":",
"del",
"kwargs",
"[",
"'trans'",
"]",
"scale_name",
"=",
"'scale_{}_{}'",
".",
"format",
"(",
"ae",
",",
"stype",
")",
"scale_klass",
"=",
"Registry",
"[",
"scale_name",
"]",
"return",
"scale_klass",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")"
] | Return a proper scale object for the series
The scale is for the aesthetic ae, and args & kwargs
are passed on to the scale creating class | [
"Return",
"a",
"proper",
"scale",
"object",
"for",
"the",
"series"
] | train | https://github.com/has2k1/plotnine/blob/566e579af705367e584fb27a74e6c5199624ca89/plotnine/scales/scales.py#L293-L309 | 0.001984 |
LCAV/pylocus | pylocus/point_set.py | AngleSet.get_convex_polygons | def get_convex_polygons(self, m, print_out=False):
"""
:param m: size of polygones (number of corners)
:return: (ordered) indices of all convex polygones of size m.
"""
convex_polygons = []
for corners in itertools.combinations(np.arange(self.N), m):
p = np.zeros(m, np.uint)
p[0] = corners[0]
left = corners[1:]
# loop through second corners
for i, second in enumerate(corners[1:m - 1]):
p[1] = second
left = np.delete(corners, (0, i + 1))
for j, last in enumerate(corners[i + 2:]):
left = np.delete(corners, (0, i + 1, j + i + 2))
p[-1] = last
# loop through all permutations of left corners.
for permut in itertools.permutations(left):
p[2:-1] = permut
sum_theta = 0
# sum over all inner angles.
for k in range(m):
sum_theta += self.get_inner_angle(
p[1], (p[0], p[2]))
p = np.roll(p, 1)
angle = sum_theta
sum_angle = (m - 2) * pi
if (abs(angle - sum_angle) < 1e-14 or
abs(angle) < 1e-14):
if (print_out):
print("convex polygon found: ", p)
convex_polygons.append(p.copy())
# elif (angle < sum_angle):
# if (print_out): print("non convex polygon found:",p,angle)
elif (angle > sum_angle):
if (print_out):
print("oops")
return convex_polygons | python | def get_convex_polygons(self, m, print_out=False):
"""
:param m: size of polygones (number of corners)
:return: (ordered) indices of all convex polygones of size m.
"""
convex_polygons = []
for corners in itertools.combinations(np.arange(self.N), m):
p = np.zeros(m, np.uint)
p[0] = corners[0]
left = corners[1:]
# loop through second corners
for i, second in enumerate(corners[1:m - 1]):
p[1] = second
left = np.delete(corners, (0, i + 1))
for j, last in enumerate(corners[i + 2:]):
left = np.delete(corners, (0, i + 1, j + i + 2))
p[-1] = last
# loop through all permutations of left corners.
for permut in itertools.permutations(left):
p[2:-1] = permut
sum_theta = 0
# sum over all inner angles.
for k in range(m):
sum_theta += self.get_inner_angle(
p[1], (p[0], p[2]))
p = np.roll(p, 1)
angle = sum_theta
sum_angle = (m - 2) * pi
if (abs(angle - sum_angle) < 1e-14 or
abs(angle) < 1e-14):
if (print_out):
print("convex polygon found: ", p)
convex_polygons.append(p.copy())
# elif (angle < sum_angle):
# if (print_out): print("non convex polygon found:",p,angle)
elif (angle > sum_angle):
if (print_out):
print("oops")
return convex_polygons | [
"def",
"get_convex_polygons",
"(",
"self",
",",
"m",
",",
"print_out",
"=",
"False",
")",
":",
"convex_polygons",
"=",
"[",
"]",
"for",
"corners",
"in",
"itertools",
".",
"combinations",
"(",
"np",
".",
"arange",
"(",
"self",
".",
"N",
")",
",",
"m",
")",
":",
"p",
"=",
"np",
".",
"zeros",
"(",
"m",
",",
"np",
".",
"uint",
")",
"p",
"[",
"0",
"]",
"=",
"corners",
"[",
"0",
"]",
"left",
"=",
"corners",
"[",
"1",
":",
"]",
"# loop through second corners",
"for",
"i",
",",
"second",
"in",
"enumerate",
"(",
"corners",
"[",
"1",
":",
"m",
"-",
"1",
"]",
")",
":",
"p",
"[",
"1",
"]",
"=",
"second",
"left",
"=",
"np",
".",
"delete",
"(",
"corners",
",",
"(",
"0",
",",
"i",
"+",
"1",
")",
")",
"for",
"j",
",",
"last",
"in",
"enumerate",
"(",
"corners",
"[",
"i",
"+",
"2",
":",
"]",
")",
":",
"left",
"=",
"np",
".",
"delete",
"(",
"corners",
",",
"(",
"0",
",",
"i",
"+",
"1",
",",
"j",
"+",
"i",
"+",
"2",
")",
")",
"p",
"[",
"-",
"1",
"]",
"=",
"last",
"# loop through all permutations of left corners.",
"for",
"permut",
"in",
"itertools",
".",
"permutations",
"(",
"left",
")",
":",
"p",
"[",
"2",
":",
"-",
"1",
"]",
"=",
"permut",
"sum_theta",
"=",
"0",
"# sum over all inner angles.",
"for",
"k",
"in",
"range",
"(",
"m",
")",
":",
"sum_theta",
"+=",
"self",
".",
"get_inner_angle",
"(",
"p",
"[",
"1",
"]",
",",
"(",
"p",
"[",
"0",
"]",
",",
"p",
"[",
"2",
"]",
")",
")",
"p",
"=",
"np",
".",
"roll",
"(",
"p",
",",
"1",
")",
"angle",
"=",
"sum_theta",
"sum_angle",
"=",
"(",
"m",
"-",
"2",
")",
"*",
"pi",
"if",
"(",
"abs",
"(",
"angle",
"-",
"sum_angle",
")",
"<",
"1e-14",
"or",
"abs",
"(",
"angle",
")",
"<",
"1e-14",
")",
":",
"if",
"(",
"print_out",
")",
":",
"print",
"(",
"\"convex polygon found: \"",
",",
"p",
")",
"convex_polygons",
".",
"append",
"(",
"p",
".",
"copy",
"(",
")",
")",
"# elif (angle < sum_angle):",
"# if (print_out): print(\"non convex polygon found:\",p,angle)",
"elif",
"(",
"angle",
">",
"sum_angle",
")",
":",
"if",
"(",
"print_out",
")",
":",
"print",
"(",
"\"oops\"",
")",
"return",
"convex_polygons"
] | :param m: size of polygones (number of corners)
:return: (ordered) indices of all convex polygones of size m. | [
":",
"param",
"m",
":",
"size",
"of",
"polygones",
"(",
"number",
"of",
"corners",
")",
":",
"return",
":",
"(",
"ordered",
")",
"indices",
"of",
"all",
"convex",
"polygones",
"of",
"size",
"m",
"."
] | train | https://github.com/LCAV/pylocus/blob/c56a38c251d8a435caf4641a8ae6027ecba2c8c6/pylocus/point_set.py#L467-L506 | 0.002094 |
ajenhl/tacl | tacl/sequence.py | SequenceReport._generate_sequences | def _generate_sequences(self, primary_label, secondary_label, ngrams):
"""Generates aligned sequences between each witness labelled
`primary_label` and each witness labelled `secondary_label`,
based around `ngrams`.
:param primary_label: label for one side of the pairs of
witnesses to align
:type primary_label: `str`
:param secondary_label: label for the other side of the pairs
of witnesses to align
:type secondary_label: `str`
:param ngrams: n-grams to base sequences off
:type ngrams: `list` of `str`
"""
cols = [constants.WORK_FIELDNAME, constants.SIGLUM_FIELDNAME]
primary_works = self._matches[self._matches[
constants.LABEL_FIELDNAME] == primary_label][
cols].drop_duplicates()
secondary_works = self._matches[self._matches[
constants.LABEL_FIELDNAME] == secondary_label][
cols].drop_duplicates()
for index, (work1, siglum1) in primary_works.iterrows():
text1 = self._get_text(self._corpus.get_witness(work1, siglum1))
label1 = '{}_{}'.format(work1, siglum1)
for index, (work2, siglum2) in secondary_works.iterrows():
text2 = self._get_text(self._corpus.get_witness(
work2, siglum2))
label2 = '{}_{}'.format(work2, siglum2)
self._generate_sequences_for_texts(label1, text1, label2,
text2, ngrams) | python | def _generate_sequences(self, primary_label, secondary_label, ngrams):
"""Generates aligned sequences between each witness labelled
`primary_label` and each witness labelled `secondary_label`,
based around `ngrams`.
:param primary_label: label for one side of the pairs of
witnesses to align
:type primary_label: `str`
:param secondary_label: label for the other side of the pairs
of witnesses to align
:type secondary_label: `str`
:param ngrams: n-grams to base sequences off
:type ngrams: `list` of `str`
"""
cols = [constants.WORK_FIELDNAME, constants.SIGLUM_FIELDNAME]
primary_works = self._matches[self._matches[
constants.LABEL_FIELDNAME] == primary_label][
cols].drop_duplicates()
secondary_works = self._matches[self._matches[
constants.LABEL_FIELDNAME] == secondary_label][
cols].drop_duplicates()
for index, (work1, siglum1) in primary_works.iterrows():
text1 = self._get_text(self._corpus.get_witness(work1, siglum1))
label1 = '{}_{}'.format(work1, siglum1)
for index, (work2, siglum2) in secondary_works.iterrows():
text2 = self._get_text(self._corpus.get_witness(
work2, siglum2))
label2 = '{}_{}'.format(work2, siglum2)
self._generate_sequences_for_texts(label1, text1, label2,
text2, ngrams) | [
"def",
"_generate_sequences",
"(",
"self",
",",
"primary_label",
",",
"secondary_label",
",",
"ngrams",
")",
":",
"cols",
"=",
"[",
"constants",
".",
"WORK_FIELDNAME",
",",
"constants",
".",
"SIGLUM_FIELDNAME",
"]",
"primary_works",
"=",
"self",
".",
"_matches",
"[",
"self",
".",
"_matches",
"[",
"constants",
".",
"LABEL_FIELDNAME",
"]",
"==",
"primary_label",
"]",
"[",
"cols",
"]",
".",
"drop_duplicates",
"(",
")",
"secondary_works",
"=",
"self",
".",
"_matches",
"[",
"self",
".",
"_matches",
"[",
"constants",
".",
"LABEL_FIELDNAME",
"]",
"==",
"secondary_label",
"]",
"[",
"cols",
"]",
".",
"drop_duplicates",
"(",
")",
"for",
"index",
",",
"(",
"work1",
",",
"siglum1",
")",
"in",
"primary_works",
".",
"iterrows",
"(",
")",
":",
"text1",
"=",
"self",
".",
"_get_text",
"(",
"self",
".",
"_corpus",
".",
"get_witness",
"(",
"work1",
",",
"siglum1",
")",
")",
"label1",
"=",
"'{}_{}'",
".",
"format",
"(",
"work1",
",",
"siglum1",
")",
"for",
"index",
",",
"(",
"work2",
",",
"siglum2",
")",
"in",
"secondary_works",
".",
"iterrows",
"(",
")",
":",
"text2",
"=",
"self",
".",
"_get_text",
"(",
"self",
".",
"_corpus",
".",
"get_witness",
"(",
"work2",
",",
"siglum2",
")",
")",
"label2",
"=",
"'{}_{}'",
".",
"format",
"(",
"work2",
",",
"siglum2",
")",
"self",
".",
"_generate_sequences_for_texts",
"(",
"label1",
",",
"text1",
",",
"label2",
",",
"text2",
",",
"ngrams",
")"
] | Generates aligned sequences between each witness labelled
`primary_label` and each witness labelled `secondary_label`,
based around `ngrams`.
:param primary_label: label for one side of the pairs of
witnesses to align
:type primary_label: `str`
:param secondary_label: label for the other side of the pairs
of witnesses to align
:type secondary_label: `str`
:param ngrams: n-grams to base sequences off
:type ngrams: `list` of `str` | [
"Generates",
"aligned",
"sequences",
"between",
"each",
"witness",
"labelled",
"primary_label",
"and",
"each",
"witness",
"labelled",
"secondary_label",
"based",
"around",
"ngrams",
"."
] | train | https://github.com/ajenhl/tacl/blob/b8a343248e77f1c07a5a4ac133a9ad6e0b4781c2/tacl/sequence.py#L151-L181 | 0.001255 |
cortical-io/retina-sdk.py | retinasdk/full_client.py | FullClient.getImage | def getImage(self, body, imageScalar=2, plotShape="circle", imageEncoding="base64/png", sparsity=1.0):
"""Get images for expressions
Args:
body, ExpressionOperation: The JSON encoded expression to be evaluated (required)
imageScalar, int: The scale of the image (optional)
plotShape, str: The image shape (optional)
imageEncoding, str: The encoding of the returned image (optional)
sparsity, float: Sparsify the resulting expression to this percentage (optional)
Returns:
str with the raw byte data of the image
Raises:
CorticalioException: if the request was not successful
"""
return self._image.getImageForExpression(self._retina, body, imageScalar, plotShape, imageEncoding, sparsity) | python | def getImage(self, body, imageScalar=2, plotShape="circle", imageEncoding="base64/png", sparsity=1.0):
"""Get images for expressions
Args:
body, ExpressionOperation: The JSON encoded expression to be evaluated (required)
imageScalar, int: The scale of the image (optional)
plotShape, str: The image shape (optional)
imageEncoding, str: The encoding of the returned image (optional)
sparsity, float: Sparsify the resulting expression to this percentage (optional)
Returns:
str with the raw byte data of the image
Raises:
CorticalioException: if the request was not successful
"""
return self._image.getImageForExpression(self._retina, body, imageScalar, plotShape, imageEncoding, sparsity) | [
"def",
"getImage",
"(",
"self",
",",
"body",
",",
"imageScalar",
"=",
"2",
",",
"plotShape",
"=",
"\"circle\"",
",",
"imageEncoding",
"=",
"\"base64/png\"",
",",
"sparsity",
"=",
"1.0",
")",
":",
"return",
"self",
".",
"_image",
".",
"getImageForExpression",
"(",
"self",
".",
"_retina",
",",
"body",
",",
"imageScalar",
",",
"plotShape",
",",
"imageEncoding",
",",
"sparsity",
")"
] | Get images for expressions
Args:
body, ExpressionOperation: The JSON encoded expression to be evaluated (required)
imageScalar, int: The scale of the image (optional)
plotShape, str: The image shape (optional)
imageEncoding, str: The encoding of the returned image (optional)
sparsity, float: Sparsify the resulting expression to this percentage (optional)
Returns:
str with the raw byte data of the image
Raises:
CorticalioException: if the request was not successful | [
"Get",
"images",
"for",
"expressions",
"Args",
":",
"body",
"ExpressionOperation",
":",
"The",
"JSON",
"encoded",
"expression",
"to",
"be",
"evaluated",
"(",
"required",
")",
"imageScalar",
"int",
":",
"The",
"scale",
"of",
"the",
"image",
"(",
"optional",
")",
"plotShape",
"str",
":",
"The",
"image",
"shape",
"(",
"optional",
")",
"imageEncoding",
"str",
":",
"The",
"encoding",
"of",
"the",
"returned",
"image",
"(",
"optional",
")",
"sparsity",
"float",
":",
"Sparsify",
"the",
"resulting",
"expression",
"to",
"this",
"percentage",
"(",
"optional",
")",
"Returns",
":",
"str",
"with",
"the",
"raw",
"byte",
"data",
"of",
"the",
"image",
"Raises",
":",
"CorticalioException",
":",
"if",
"the",
"request",
"was",
"not",
"successful"
] | train | https://github.com/cortical-io/retina-sdk.py/blob/474c13ad399fe1e974d2650335537608f4456b07/retinasdk/full_client.py#L285-L298 | 0.007317 |
kodexlab/reliure | reliure/engine.py | PlayMeta.errors | def errors(self):
""" get all the errors
>>> gres = PlayMeta("operation")
>>> res_plus = BasicPlayMeta(Composable(name="plus"))
>>> gres.append(res_plus)
>>> res_plus.add_error(ValueError("invalid data"))
>>> res_moins = BasicPlayMeta(Composable(name="moins"))
>>> gres.append(res_moins)
>>> res_plus.add_error(RuntimeError("server not anwsering"))
>>> gres.errors
[ValueError('invalid data',), RuntimeError('server not anwsering',)]
"""
errors = []
for meta in self:
errors.extend(meta.errors)
return errors | python | def errors(self):
""" get all the errors
>>> gres = PlayMeta("operation")
>>> res_plus = BasicPlayMeta(Composable(name="plus"))
>>> gres.append(res_plus)
>>> res_plus.add_error(ValueError("invalid data"))
>>> res_moins = BasicPlayMeta(Composable(name="moins"))
>>> gres.append(res_moins)
>>> res_plus.add_error(RuntimeError("server not anwsering"))
>>> gres.errors
[ValueError('invalid data',), RuntimeError('server not anwsering',)]
"""
errors = []
for meta in self:
errors.extend(meta.errors)
return errors | [
"def",
"errors",
"(",
"self",
")",
":",
"errors",
"=",
"[",
"]",
"for",
"meta",
"in",
"self",
":",
"errors",
".",
"extend",
"(",
"meta",
".",
"errors",
")",
"return",
"errors"
] | get all the errors
>>> gres = PlayMeta("operation")
>>> res_plus = BasicPlayMeta(Composable(name="plus"))
>>> gres.append(res_plus)
>>> res_plus.add_error(ValueError("invalid data"))
>>> res_moins = BasicPlayMeta(Composable(name="moins"))
>>> gres.append(res_moins)
>>> res_plus.add_error(RuntimeError("server not anwsering"))
>>> gres.errors
[ValueError('invalid data',), RuntimeError('server not anwsering',)] | [
"get",
"all",
"the",
"errors"
] | train | https://github.com/kodexlab/reliure/blob/0450c7a9254c5c003162738458bbe0c49e777ba5/reliure/engine.py#L193-L209 | 0.00316 |
happyleavesaoc/python-limitlessled | limitlessled/group/commands/v6.py | CommandSetV6.convert_saturation | def convert_saturation(self, saturation):
"""
Convert the saturation from decimal percent (0.0-1.0)
to byte representation for use in commands.
:param saturation: The saturation from in decimal percent (0.0-1.0).
1.0 is the maximum saturation where no white leds will be on. 0.0 is no
saturation.
:return: The saturation in byte representation.
"""
saturation_inverted = 1 - saturation
return math.ceil(saturation_inverted * self.MAX_SATURATION) | python | def convert_saturation(self, saturation):
"""
Convert the saturation from decimal percent (0.0-1.0)
to byte representation for use in commands.
:param saturation: The saturation from in decimal percent (0.0-1.0).
1.0 is the maximum saturation where no white leds will be on. 0.0 is no
saturation.
:return: The saturation in byte representation.
"""
saturation_inverted = 1 - saturation
return math.ceil(saturation_inverted * self.MAX_SATURATION) | [
"def",
"convert_saturation",
"(",
"self",
",",
"saturation",
")",
":",
"saturation_inverted",
"=",
"1",
"-",
"saturation",
"return",
"math",
".",
"ceil",
"(",
"saturation_inverted",
"*",
"self",
".",
"MAX_SATURATION",
")"
] | Convert the saturation from decimal percent (0.0-1.0)
to byte representation for use in commands.
:param saturation: The saturation from in decimal percent (0.0-1.0).
1.0 is the maximum saturation where no white leds will be on. 0.0 is no
saturation.
:return: The saturation in byte representation. | [
"Convert",
"the",
"saturation",
"from",
"decimal",
"percent",
"(",
"0",
".",
"0",
"-",
"1",
".",
"0",
")",
"to",
"byte",
"representation",
"for",
"use",
"in",
"commands",
".",
":",
"param",
"saturation",
":",
"The",
"saturation",
"from",
"in",
"decimal",
"percent",
"(",
"0",
".",
"0",
"-",
"1",
".",
"0",
")",
".",
"1",
".",
"0",
"is",
"the",
"maximum",
"saturation",
"where",
"no",
"white",
"leds",
"will",
"be",
"on",
".",
"0",
".",
"0",
"is",
"no",
"saturation",
".",
":",
"return",
":",
"The",
"saturation",
"in",
"byte",
"representation",
"."
] | train | https://github.com/happyleavesaoc/python-limitlessled/blob/70307c2bf8c91430a99579d2ad18b228ec7a8488/limitlessled/group/commands/v6.py#L96-L107 | 0.003802 |
resync/resync | resync/sitemap.py | Sitemap.resource_as_xml | def resource_as_xml(self, resource):
"""Return string for the resource as part of an XML sitemap.
Returns a string with the XML snippet representing the resource,
without any XML declaration.
"""
e = self.resource_etree_element(resource)
if (sys.version_info >= (3, 0)):
# python3.x
return(tostring(e, encoding='unicode', method='xml'))
elif (sys.version_info >= (2, 7)):
s = tostring(e, encoding='UTF-8', method='xml')
else:
# must not specify method='xml' in python2.6
s = tostring(e, encoding='UTF-8')
# Chop off XML declaration that is added in 2.x... sigh
return(s.replace("<?xml version='1.0' encoding='UTF-8'?>\n", '')) | python | def resource_as_xml(self, resource):
"""Return string for the resource as part of an XML sitemap.
Returns a string with the XML snippet representing the resource,
without any XML declaration.
"""
e = self.resource_etree_element(resource)
if (sys.version_info >= (3, 0)):
# python3.x
return(tostring(e, encoding='unicode', method='xml'))
elif (sys.version_info >= (2, 7)):
s = tostring(e, encoding='UTF-8', method='xml')
else:
# must not specify method='xml' in python2.6
s = tostring(e, encoding='UTF-8')
# Chop off XML declaration that is added in 2.x... sigh
return(s.replace("<?xml version='1.0' encoding='UTF-8'?>\n", '')) | [
"def",
"resource_as_xml",
"(",
"self",
",",
"resource",
")",
":",
"e",
"=",
"self",
".",
"resource_etree_element",
"(",
"resource",
")",
"if",
"(",
"sys",
".",
"version_info",
">=",
"(",
"3",
",",
"0",
")",
")",
":",
"# python3.x",
"return",
"(",
"tostring",
"(",
"e",
",",
"encoding",
"=",
"'unicode'",
",",
"method",
"=",
"'xml'",
")",
")",
"elif",
"(",
"sys",
".",
"version_info",
">=",
"(",
"2",
",",
"7",
")",
")",
":",
"s",
"=",
"tostring",
"(",
"e",
",",
"encoding",
"=",
"'UTF-8'",
",",
"method",
"=",
"'xml'",
")",
"else",
":",
"# must not specify method='xml' in python2.6",
"s",
"=",
"tostring",
"(",
"e",
",",
"encoding",
"=",
"'UTF-8'",
")",
"# Chop off XML declaration that is added in 2.x... sigh",
"return",
"(",
"s",
".",
"replace",
"(",
"\"<?xml version='1.0' encoding='UTF-8'?>\\n\"",
",",
"''",
")",
")"
] | Return string for the resource as part of an XML sitemap.
Returns a string with the XML snippet representing the resource,
without any XML declaration. | [
"Return",
"string",
"for",
"the",
"resource",
"as",
"part",
"of",
"an",
"XML",
"sitemap",
"."
] | train | https://github.com/resync/resync/blob/98292c17b2c00f2d6f5191c6ab51fef8c292a018/resync/sitemap.py#L281-L297 | 0.005215 |
Damgaard/PyImgur | pyimgur/__init__.py | User.get_replies | def get_replies(self, new=True):
"""
Return all reply notifications for this user.
:param new: False for all notifications, True for only non-viewed
notifications.
"""
url = (self._imgur._base_url + "/3/account/{0}/"
"notifications/replies".format(self.name))
return self._imgur._send_request(url, needs_auth=True) | python | def get_replies(self, new=True):
"""
Return all reply notifications for this user.
:param new: False for all notifications, True for only non-viewed
notifications.
"""
url = (self._imgur._base_url + "/3/account/{0}/"
"notifications/replies".format(self.name))
return self._imgur._send_request(url, needs_auth=True) | [
"def",
"get_replies",
"(",
"self",
",",
"new",
"=",
"True",
")",
":",
"url",
"=",
"(",
"self",
".",
"_imgur",
".",
"_base_url",
"+",
"\"/3/account/{0}/\"",
"\"notifications/replies\"",
".",
"format",
"(",
"self",
".",
"name",
")",
")",
"return",
"self",
".",
"_imgur",
".",
"_send_request",
"(",
"url",
",",
"needs_auth",
"=",
"True",
")"
] | Return all reply notifications for this user.
:param new: False for all notifications, True for only non-viewed
notifications. | [
"Return",
"all",
"reply",
"notifications",
"for",
"this",
"user",
"."
] | train | https://github.com/Damgaard/PyImgur/blob/606f17078d24158632f807430f8d0b9b3cd8b312/pyimgur/__init__.py#L1375-L1384 | 0.005128 |
7sDream/zhihu-py3 | zhihu/answer.py | Answer.content | def content(self):
"""以处理过的Html代码形式返回答案内容.
:return: 答案内容
:rtype: str
"""
answer_wrap = self.soup.find('div', id='zh-question-answer-wrap')
content = answer_wrap.find('div', class_='zm-editable-content')
content = answer_content_process(content)
return content | python | def content(self):
"""以处理过的Html代码形式返回答案内容.
:return: 答案内容
:rtype: str
"""
answer_wrap = self.soup.find('div', id='zh-question-answer-wrap')
content = answer_wrap.find('div', class_='zm-editable-content')
content = answer_content_process(content)
return content | [
"def",
"content",
"(",
"self",
")",
":",
"answer_wrap",
"=",
"self",
".",
"soup",
".",
"find",
"(",
"'div'",
",",
"id",
"=",
"'zh-question-answer-wrap'",
")",
"content",
"=",
"answer_wrap",
".",
"find",
"(",
"'div'",
",",
"class_",
"=",
"'zm-editable-content'",
")",
"content",
"=",
"answer_content_process",
"(",
"content",
")",
"return",
"content"
] | 以处理过的Html代码形式返回答案内容.
:return: 答案内容
:rtype: str | [
"以处理过的Html代码形式返回答案内容",
"."
] | train | https://github.com/7sDream/zhihu-py3/blob/bcb4aa8325f8b54d3b44bd0bdc959edd9761fcfc/zhihu/answer.py#L145-L154 | 0.006173 |
MrYsLab/pymata-aio | examples/sparkfun_redbot/pixy/pan_and_tilt_config_1_demo_simple.py | pixy_value_update | def pixy_value_update(current_pan_angle_deg, prev_pan_move_deg, blocks):
""" Prints the Pixy blocks data."""
if len(blocks) > 0:
pan_error = X_CENTER - blocks[0]["x"]
if math.fabs(pan_error) < 20.0:
print("Close enough.")
return current_pan_angle_deg, 0
pan_move_deg = 1 if pan_error > 0 else -1
if prev_pan_move_deg > 0 and pan_move_deg > 0:
pan_move_deg = 3
if prev_pan_move_deg < 0 and pan_move_deg < 0:
pan_move_deg = -3
current_pan_angle_deg += pan_move_deg
if current_pan_angle_deg > 150:
current_pan_angle_deg = 150
if current_pan_angle_deg < 20:
current_pan_angle_deg = 20
print("x: {} pan_error: {} pan_move_deg: {} angle: {}".format(blocks[0]["x"], pan_error, pan_move_deg, current_pan_angle_deg))
return current_pan_angle_deg, pan_move_deg
return current_pan_angle_deg, 0 | python | def pixy_value_update(current_pan_angle_deg, prev_pan_move_deg, blocks):
""" Prints the Pixy blocks data."""
if len(blocks) > 0:
pan_error = X_CENTER - blocks[0]["x"]
if math.fabs(pan_error) < 20.0:
print("Close enough.")
return current_pan_angle_deg, 0
pan_move_deg = 1 if pan_error > 0 else -1
if prev_pan_move_deg > 0 and pan_move_deg > 0:
pan_move_deg = 3
if prev_pan_move_deg < 0 and pan_move_deg < 0:
pan_move_deg = -3
current_pan_angle_deg += pan_move_deg
if current_pan_angle_deg > 150:
current_pan_angle_deg = 150
if current_pan_angle_deg < 20:
current_pan_angle_deg = 20
print("x: {} pan_error: {} pan_move_deg: {} angle: {}".format(blocks[0]["x"], pan_error, pan_move_deg, current_pan_angle_deg))
return current_pan_angle_deg, pan_move_deg
return current_pan_angle_deg, 0 | [
"def",
"pixy_value_update",
"(",
"current_pan_angle_deg",
",",
"prev_pan_move_deg",
",",
"blocks",
")",
":",
"if",
"len",
"(",
"blocks",
")",
">",
"0",
":",
"pan_error",
"=",
"X_CENTER",
"-",
"blocks",
"[",
"0",
"]",
"[",
"\"x\"",
"]",
"if",
"math",
".",
"fabs",
"(",
"pan_error",
")",
"<",
"20.0",
":",
"print",
"(",
"\"Close enough.\"",
")",
"return",
"current_pan_angle_deg",
",",
"0",
"pan_move_deg",
"=",
"1",
"if",
"pan_error",
">",
"0",
"else",
"-",
"1",
"if",
"prev_pan_move_deg",
">",
"0",
"and",
"pan_move_deg",
">",
"0",
":",
"pan_move_deg",
"=",
"3",
"if",
"prev_pan_move_deg",
"<",
"0",
"and",
"pan_move_deg",
"<",
"0",
":",
"pan_move_deg",
"=",
"-",
"3",
"current_pan_angle_deg",
"+=",
"pan_move_deg",
"if",
"current_pan_angle_deg",
">",
"150",
":",
"current_pan_angle_deg",
"=",
"150",
"if",
"current_pan_angle_deg",
"<",
"20",
":",
"current_pan_angle_deg",
"=",
"20",
"print",
"(",
"\"x: {} pan_error: {} pan_move_deg: {} angle: {}\"",
".",
"format",
"(",
"blocks",
"[",
"0",
"]",
"[",
"\"x\"",
"]",
",",
"pan_error",
",",
"pan_move_deg",
",",
"current_pan_angle_deg",
")",
")",
"return",
"current_pan_angle_deg",
",",
"pan_move_deg",
"return",
"current_pan_angle_deg",
",",
"0"
] | Prints the Pixy blocks data. | [
"Prints",
"the",
"Pixy",
"blocks",
"data",
"."
] | train | https://github.com/MrYsLab/pymata-aio/blob/015081a4628b9d47dfe3f8d6c698ff903f107810/examples/sparkfun_redbot/pixy/pan_and_tilt_config_1_demo_simple.py#L51-L70 | 0.002107 |
zimeon/iiif | iiif/request.py | IIIFRequest.url | def url(self, **params):
"""Build a URL path for image or info request.
An IIIF Image request with parameterized form is assumed unless
the info parameter is specified, in which case an Image Information
request URI is constructred.
"""
self._setattrs(**params)
path = self.baseurl + self.quote(self.identifier) + "/"
if (self.info):
# info request
path += "info"
format = self.format if self.format else "json"
else:
# region
if self.region:
region = self.region
elif self.region_xywh:
region = "%d,%d,%d,%d" % tuple(self.region_xywh)
else:
region = "full"
# size
if self.size:
size = self.size
elif self.size_wh:
if (self.size_wh[0] is None):
size = ",%d" % (self.size_wh[1])
elif (self.size_wh[1] is None):
size = "%d," % (self.size_wh[0])
else:
size = "%d,%d" % (self.size_wh[0], self.size_wh[1])
elif (self.size_max and self.api_version >= '2.1'):
size = 'max'
else:
size = "full"
# rotation and quality
rotation = self.rotation if self.rotation else "0"
quality = self.quality if self.quality else self.default_quality
# parameterized form
path += self.quote(region) + "/" +\
self.quote(size) + "/" +\
self.quote(rotation) + "/" +\
self.quote(quality)
format = self.format
if (format):
path += "." + format
return(path) | python | def url(self, **params):
"""Build a URL path for image or info request.
An IIIF Image request with parameterized form is assumed unless
the info parameter is specified, in which case an Image Information
request URI is constructred.
"""
self._setattrs(**params)
path = self.baseurl + self.quote(self.identifier) + "/"
if (self.info):
# info request
path += "info"
format = self.format if self.format else "json"
else:
# region
if self.region:
region = self.region
elif self.region_xywh:
region = "%d,%d,%d,%d" % tuple(self.region_xywh)
else:
region = "full"
# size
if self.size:
size = self.size
elif self.size_wh:
if (self.size_wh[0] is None):
size = ",%d" % (self.size_wh[1])
elif (self.size_wh[1] is None):
size = "%d," % (self.size_wh[0])
else:
size = "%d,%d" % (self.size_wh[0], self.size_wh[1])
elif (self.size_max and self.api_version >= '2.1'):
size = 'max'
else:
size = "full"
# rotation and quality
rotation = self.rotation if self.rotation else "0"
quality = self.quality if self.quality else self.default_quality
# parameterized form
path += self.quote(region) + "/" +\
self.quote(size) + "/" +\
self.quote(rotation) + "/" +\
self.quote(quality)
format = self.format
if (format):
path += "." + format
return(path) | [
"def",
"url",
"(",
"self",
",",
"*",
"*",
"params",
")",
":",
"self",
".",
"_setattrs",
"(",
"*",
"*",
"params",
")",
"path",
"=",
"self",
".",
"baseurl",
"+",
"self",
".",
"quote",
"(",
"self",
".",
"identifier",
")",
"+",
"\"/\"",
"if",
"(",
"self",
".",
"info",
")",
":",
"# info request",
"path",
"+=",
"\"info\"",
"format",
"=",
"self",
".",
"format",
"if",
"self",
".",
"format",
"else",
"\"json\"",
"else",
":",
"# region",
"if",
"self",
".",
"region",
":",
"region",
"=",
"self",
".",
"region",
"elif",
"self",
".",
"region_xywh",
":",
"region",
"=",
"\"%d,%d,%d,%d\"",
"%",
"tuple",
"(",
"self",
".",
"region_xywh",
")",
"else",
":",
"region",
"=",
"\"full\"",
"# size",
"if",
"self",
".",
"size",
":",
"size",
"=",
"self",
".",
"size",
"elif",
"self",
".",
"size_wh",
":",
"if",
"(",
"self",
".",
"size_wh",
"[",
"0",
"]",
"is",
"None",
")",
":",
"size",
"=",
"\",%d\"",
"%",
"(",
"self",
".",
"size_wh",
"[",
"1",
"]",
")",
"elif",
"(",
"self",
".",
"size_wh",
"[",
"1",
"]",
"is",
"None",
")",
":",
"size",
"=",
"\"%d,\"",
"%",
"(",
"self",
".",
"size_wh",
"[",
"0",
"]",
")",
"else",
":",
"size",
"=",
"\"%d,%d\"",
"%",
"(",
"self",
".",
"size_wh",
"[",
"0",
"]",
",",
"self",
".",
"size_wh",
"[",
"1",
"]",
")",
"elif",
"(",
"self",
".",
"size_max",
"and",
"self",
".",
"api_version",
">=",
"'2.1'",
")",
":",
"size",
"=",
"'max'",
"else",
":",
"size",
"=",
"\"full\"",
"# rotation and quality",
"rotation",
"=",
"self",
".",
"rotation",
"if",
"self",
".",
"rotation",
"else",
"\"0\"",
"quality",
"=",
"self",
".",
"quality",
"if",
"self",
".",
"quality",
"else",
"self",
".",
"default_quality",
"# parameterized form",
"path",
"+=",
"self",
".",
"quote",
"(",
"region",
")",
"+",
"\"/\"",
"+",
"self",
".",
"quote",
"(",
"size",
")",
"+",
"\"/\"",
"+",
"self",
".",
"quote",
"(",
"rotation",
")",
"+",
"\"/\"",
"+",
"self",
".",
"quote",
"(",
"quality",
")",
"format",
"=",
"self",
".",
"format",
"if",
"(",
"format",
")",
":",
"path",
"+=",
"\".\"",
"+",
"format",
"return",
"(",
"path",
")"
] | Build a URL path for image or info request.
An IIIF Image request with parameterized form is assumed unless
the info parameter is specified, in which case an Image Information
request URI is constructred. | [
"Build",
"a",
"URL",
"path",
"for",
"image",
"or",
"info",
"request",
"."
] | train | https://github.com/zimeon/iiif/blob/9d10018d01202fa2a76dfa61598dc6eca07b471f/iiif/request.py#L148-L194 | 0.001672 |
nion-software/nionswift-io | nionswift_plugin/TIFF_IO/tifffile.py | read_utf8 | def read_utf8(fh, byteorder, dtype, count, offsetsize):
"""Read tag data from file and return as unicode string."""
return fh.read(count).decode('utf-8') | python | def read_utf8(fh, byteorder, dtype, count, offsetsize):
"""Read tag data from file and return as unicode string."""
return fh.read(count).decode('utf-8') | [
"def",
"read_utf8",
"(",
"fh",
",",
"byteorder",
",",
"dtype",
",",
"count",
",",
"offsetsize",
")",
":",
"return",
"fh",
".",
"read",
"(",
"count",
")",
".",
"decode",
"(",
"'utf-8'",
")"
] | Read tag data from file and return as unicode string. | [
"Read",
"tag",
"data",
"from",
"file",
"and",
"return",
"as",
"unicode",
"string",
"."
] | train | https://github.com/nion-software/nionswift-io/blob/e9ae37f01faa9332c48b647f93afd5ef2166b155/nionswift_plugin/TIFF_IO/tifffile.py#L8123-L8125 | 0.006211 |
toumorokoshi/sprinter | sprinter/next/environment/injections.py | Injections.inject_content | def inject_content(self, content, inject_string):
"""
Inject inject_string into a text buffer, wrapped with
#{{ wrapper }} comments if condition lambda is not
satisfied or is None. Remove old instances of injects if they
exist.
"""
inject_string = _unicode(inject_string)
content = self.wrapper_match.sub("", _unicode(content))
if self.override_match:
sprinter_overrides = self.override_match.search(content)
if sprinter_overrides:
content = self.override_match.sub("", content)
sprinter_overrides = sprinter_overrides.groups()[0]
else:
sprinter_overrides = ""
content += """
%s
%s
%s
""" % (self.wrapper, inject_string.rstrip(), self.wrapper)
if self.override_match:
content += sprinter_overrides.rstrip() + "\n"
return content | python | def inject_content(self, content, inject_string):
"""
Inject inject_string into a text buffer, wrapped with
#{{ wrapper }} comments if condition lambda is not
satisfied or is None. Remove old instances of injects if they
exist.
"""
inject_string = _unicode(inject_string)
content = self.wrapper_match.sub("", _unicode(content))
if self.override_match:
sprinter_overrides = self.override_match.search(content)
if sprinter_overrides:
content = self.override_match.sub("", content)
sprinter_overrides = sprinter_overrides.groups()[0]
else:
sprinter_overrides = ""
content += """
%s
%s
%s
""" % (self.wrapper, inject_string.rstrip(), self.wrapper)
if self.override_match:
content += sprinter_overrides.rstrip() + "\n"
return content | [
"def",
"inject_content",
"(",
"self",
",",
"content",
",",
"inject_string",
")",
":",
"inject_string",
"=",
"_unicode",
"(",
"inject_string",
")",
"content",
"=",
"self",
".",
"wrapper_match",
".",
"sub",
"(",
"\"\"",
",",
"_unicode",
"(",
"content",
")",
")",
"if",
"self",
".",
"override_match",
":",
"sprinter_overrides",
"=",
"self",
".",
"override_match",
".",
"search",
"(",
"content",
")",
"if",
"sprinter_overrides",
":",
"content",
"=",
"self",
".",
"override_match",
".",
"sub",
"(",
"\"\"",
",",
"content",
")",
"sprinter_overrides",
"=",
"sprinter_overrides",
".",
"groups",
"(",
")",
"[",
"0",
"]",
"else",
":",
"sprinter_overrides",
"=",
"\"\"",
"content",
"+=",
"\"\"\"\n%s\n%s\n%s\n\"\"\"",
"%",
"(",
"self",
".",
"wrapper",
",",
"inject_string",
".",
"rstrip",
"(",
")",
",",
"self",
".",
"wrapper",
")",
"if",
"self",
".",
"override_match",
":",
"content",
"+=",
"sprinter_overrides",
".",
"rstrip",
"(",
")",
"+",
"\"\\n\"",
"return",
"content"
] | Inject inject_string into a text buffer, wrapped with
#{{ wrapper }} comments if condition lambda is not
satisfied or is None. Remove old instances of injects if they
exist. | [
"Inject",
"inject_string",
"into",
"a",
"text",
"buffer",
"wrapped",
"with",
"#",
"{{",
"wrapper",
"}}",
"comments",
"if",
"condition",
"lambda",
"is",
"not",
"satisfied",
"or",
"is",
"None",
".",
"Remove",
"old",
"instances",
"of",
"injects",
"if",
"they",
"exist",
"."
] | train | https://github.com/toumorokoshi/sprinter/blob/846697a7a087e69c61d075232e754d6975a64152/sprinter/next/environment/injections.py#L129-L152 | 0.002174 |
glue-viz/glue-vispy-viewers | glue_vispy_viewers/extern/vispy/plot/plotwidget.py | PlotWidget.mesh | def mesh(self, vertices=None, faces=None, vertex_colors=None,
face_colors=None, color=(0.5, 0.5, 1.), fname=None,
meshdata=None):
"""Show a 3D mesh
Parameters
----------
vertices : array
Vertices.
faces : array | None
Face definitions.
vertex_colors : array | None
Vertex colors.
face_colors : array | None
Face colors.
color : instance of Color
Color to use.
fname : str | None
Filename to load. If not None, then vertices, faces, and meshdata
must be None.
meshdata : MeshData | None
Meshdata to use. If not None, then vertices, faces, and fname
must be None.
Returns
-------
mesh : instance of Mesh
The mesh.
"""
self._configure_3d()
if fname is not None:
if not all(x is None for x in (vertices, faces, meshdata)):
raise ValueError('vertices, faces, and meshdata must be None '
'if fname is not None')
vertices, faces = read_mesh(fname)[:2]
if meshdata is not None:
if not all(x is None for x in (vertices, faces, fname)):
raise ValueError('vertices, faces, and fname must be None if '
'fname is not None')
else:
meshdata = MeshData(vertices, faces)
mesh = scene.Mesh(meshdata=meshdata, vertex_colors=vertex_colors,
face_colors=face_colors, color=color,
shading='smooth')
self.view.add(mesh)
self.view.camera.set_range()
return mesh | python | def mesh(self, vertices=None, faces=None, vertex_colors=None,
face_colors=None, color=(0.5, 0.5, 1.), fname=None,
meshdata=None):
"""Show a 3D mesh
Parameters
----------
vertices : array
Vertices.
faces : array | None
Face definitions.
vertex_colors : array | None
Vertex colors.
face_colors : array | None
Face colors.
color : instance of Color
Color to use.
fname : str | None
Filename to load. If not None, then vertices, faces, and meshdata
must be None.
meshdata : MeshData | None
Meshdata to use. If not None, then vertices, faces, and fname
must be None.
Returns
-------
mesh : instance of Mesh
The mesh.
"""
self._configure_3d()
if fname is not None:
if not all(x is None for x in (vertices, faces, meshdata)):
raise ValueError('vertices, faces, and meshdata must be None '
'if fname is not None')
vertices, faces = read_mesh(fname)[:2]
if meshdata is not None:
if not all(x is None for x in (vertices, faces, fname)):
raise ValueError('vertices, faces, and fname must be None if '
'fname is not None')
else:
meshdata = MeshData(vertices, faces)
mesh = scene.Mesh(meshdata=meshdata, vertex_colors=vertex_colors,
face_colors=face_colors, color=color,
shading='smooth')
self.view.add(mesh)
self.view.camera.set_range()
return mesh | [
"def",
"mesh",
"(",
"self",
",",
"vertices",
"=",
"None",
",",
"faces",
"=",
"None",
",",
"vertex_colors",
"=",
"None",
",",
"face_colors",
"=",
"None",
",",
"color",
"=",
"(",
"0.5",
",",
"0.5",
",",
"1.",
")",
",",
"fname",
"=",
"None",
",",
"meshdata",
"=",
"None",
")",
":",
"self",
".",
"_configure_3d",
"(",
")",
"if",
"fname",
"is",
"not",
"None",
":",
"if",
"not",
"all",
"(",
"x",
"is",
"None",
"for",
"x",
"in",
"(",
"vertices",
",",
"faces",
",",
"meshdata",
")",
")",
":",
"raise",
"ValueError",
"(",
"'vertices, faces, and meshdata must be None '",
"'if fname is not None'",
")",
"vertices",
",",
"faces",
"=",
"read_mesh",
"(",
"fname",
")",
"[",
":",
"2",
"]",
"if",
"meshdata",
"is",
"not",
"None",
":",
"if",
"not",
"all",
"(",
"x",
"is",
"None",
"for",
"x",
"in",
"(",
"vertices",
",",
"faces",
",",
"fname",
")",
")",
":",
"raise",
"ValueError",
"(",
"'vertices, faces, and fname must be None if '",
"'fname is not None'",
")",
"else",
":",
"meshdata",
"=",
"MeshData",
"(",
"vertices",
",",
"faces",
")",
"mesh",
"=",
"scene",
".",
"Mesh",
"(",
"meshdata",
"=",
"meshdata",
",",
"vertex_colors",
"=",
"vertex_colors",
",",
"face_colors",
"=",
"face_colors",
",",
"color",
"=",
"color",
",",
"shading",
"=",
"'smooth'",
")",
"self",
".",
"view",
".",
"add",
"(",
"mesh",
")",
"self",
".",
"view",
".",
"camera",
".",
"set_range",
"(",
")",
"return",
"mesh"
] | Show a 3D mesh
Parameters
----------
vertices : array
Vertices.
faces : array | None
Face definitions.
vertex_colors : array | None
Vertex colors.
face_colors : array | None
Face colors.
color : instance of Color
Color to use.
fname : str | None
Filename to load. If not None, then vertices, faces, and meshdata
must be None.
meshdata : MeshData | None
Meshdata to use. If not None, then vertices, faces, and fname
must be None.
Returns
-------
mesh : instance of Mesh
The mesh. | [
"Show",
"a",
"3D",
"mesh"
] | train | https://github.com/glue-viz/glue-vispy-viewers/blob/54a4351d98c1f90dfb1a557d1b447c1f57470eea/glue_vispy_viewers/extern/vispy/plot/plotwidget.py#L216-L262 | 0.002275 |
brainiak/brainiak | brainiak/factoranalysis/htfa.py | HTFA._update_global_posterior | def _update_global_posterior(
self, rank, m, outer_converged):
"""Update global posterior and then check convergence
Parameters
----------
rank : integer
The rank of current process.
m : integer
The outer iteration number of HTFA.
outer_converged : 1D array
Record whether HTFA loop converged
Returns
-------
1D array, contains only 1 element for MPI
1 means HTFA converged, 0 means not converged.
"""
if rank == 0:
self._map_update_posterior()
self._assign_posterior()
is_converged, _ = self._converged()
if is_converged:
logger.info("converged at %d outer iter" % (m))
outer_converged[0] = 1
else:
self.global_prior_ = self.global_posterior_
return outer_converged | python | def _update_global_posterior(
self, rank, m, outer_converged):
"""Update global posterior and then check convergence
Parameters
----------
rank : integer
The rank of current process.
m : integer
The outer iteration number of HTFA.
outer_converged : 1D array
Record whether HTFA loop converged
Returns
-------
1D array, contains only 1 element for MPI
1 means HTFA converged, 0 means not converged.
"""
if rank == 0:
self._map_update_posterior()
self._assign_posterior()
is_converged, _ = self._converged()
if is_converged:
logger.info("converged at %d outer iter" % (m))
outer_converged[0] = 1
else:
self.global_prior_ = self.global_posterior_
return outer_converged | [
"def",
"_update_global_posterior",
"(",
"self",
",",
"rank",
",",
"m",
",",
"outer_converged",
")",
":",
"if",
"rank",
"==",
"0",
":",
"self",
".",
"_map_update_posterior",
"(",
")",
"self",
".",
"_assign_posterior",
"(",
")",
"is_converged",
",",
"_",
"=",
"self",
".",
"_converged",
"(",
")",
"if",
"is_converged",
":",
"logger",
".",
"info",
"(",
"\"converged at %d outer iter\"",
"%",
"(",
"m",
")",
")",
"outer_converged",
"[",
"0",
"]",
"=",
"1",
"else",
":",
"self",
".",
"global_prior_",
"=",
"self",
".",
"global_posterior_",
"return",
"outer_converged"
] | Update global posterior and then check convergence
Parameters
----------
rank : integer
The rank of current process.
m : integer
The outer iteration number of HTFA.
outer_converged : 1D array
Record whether HTFA loop converged
Returns
-------
1D array, contains only 1 element for MPI
1 means HTFA converged, 0 means not converged. | [
"Update",
"global",
"posterior",
"and",
"then",
"check",
"convergence"
] | train | https://github.com/brainiak/brainiak/blob/408f12dec2ff56559a26873a848a09e4c8facfeb/brainiak/factoranalysis/htfa.py#L592-L624 | 0.002134 |
unt-libraries/pyuntl | pyuntl/highwire_structure.py | CitationPublisher.get_publisher_name | def get_publisher_name(self, **kwargs):
"""Get the publisher name."""
children = kwargs.get('children', [])
# Find the creator type in children.
for child in children:
if child.tag == 'name':
return child.content
return None | python | def get_publisher_name(self, **kwargs):
"""Get the publisher name."""
children = kwargs.get('children', [])
# Find the creator type in children.
for child in children:
if child.tag == 'name':
return child.content
return None | [
"def",
"get_publisher_name",
"(",
"self",
",",
"*",
"*",
"kwargs",
")",
":",
"children",
"=",
"kwargs",
".",
"get",
"(",
"'children'",
",",
"[",
"]",
")",
"# Find the creator type in children.",
"for",
"child",
"in",
"children",
":",
"if",
"child",
".",
"tag",
"==",
"'name'",
":",
"return",
"child",
".",
"content",
"return",
"None"
] | Get the publisher name. | [
"Get",
"the",
"publisher",
"name",
"."
] | train | https://github.com/unt-libraries/pyuntl/blob/f92413302897dab948aac18ee9e482ace0187bd4/pyuntl/highwire_structure.py#L77-L84 | 0.006849 |
santoshphilip/eppy | eppy/modeleditor.py | rename | def rename(idf, objkey, objname, newname):
"""rename all the refrences to this objname"""
refnames = getrefnames(idf, objkey)
for refname in refnames:
objlists = getallobjlists(idf, refname)
# [('OBJKEY', refname, fieldindexlist), ...]
for refname in refnames:
# TODO : there seems to be a duplication in this loop. Check.
# refname appears in both loops
for robjkey, refname, fieldindexlist in objlists:
idfobjects = idf.idfobjects[robjkey]
for idfobject in idfobjects:
for findex in fieldindexlist: # for each field
if idfobject[idfobject.objls[findex]] == objname:
idfobject[idfobject.objls[findex]] = newname
theobject = idf.getobject(objkey, objname)
fieldname = [item for item in theobject.objls if item.endswith('Name')][0]
theobject[fieldname] = newname
return theobject | python | def rename(idf, objkey, objname, newname):
"""rename all the refrences to this objname"""
refnames = getrefnames(idf, objkey)
for refname in refnames:
objlists = getallobjlists(idf, refname)
# [('OBJKEY', refname, fieldindexlist), ...]
for refname in refnames:
# TODO : there seems to be a duplication in this loop. Check.
# refname appears in both loops
for robjkey, refname, fieldindexlist in objlists:
idfobjects = idf.idfobjects[robjkey]
for idfobject in idfobjects:
for findex in fieldindexlist: # for each field
if idfobject[idfobject.objls[findex]] == objname:
idfobject[idfobject.objls[findex]] = newname
theobject = idf.getobject(objkey, objname)
fieldname = [item for item in theobject.objls if item.endswith('Name')][0]
theobject[fieldname] = newname
return theobject | [
"def",
"rename",
"(",
"idf",
",",
"objkey",
",",
"objname",
",",
"newname",
")",
":",
"refnames",
"=",
"getrefnames",
"(",
"idf",
",",
"objkey",
")",
"for",
"refname",
"in",
"refnames",
":",
"objlists",
"=",
"getallobjlists",
"(",
"idf",
",",
"refname",
")",
"# [('OBJKEY', refname, fieldindexlist), ...]",
"for",
"refname",
"in",
"refnames",
":",
"# TODO : there seems to be a duplication in this loop. Check.",
"# refname appears in both loops",
"for",
"robjkey",
",",
"refname",
",",
"fieldindexlist",
"in",
"objlists",
":",
"idfobjects",
"=",
"idf",
".",
"idfobjects",
"[",
"robjkey",
"]",
"for",
"idfobject",
"in",
"idfobjects",
":",
"for",
"findex",
"in",
"fieldindexlist",
":",
"# for each field",
"if",
"idfobject",
"[",
"idfobject",
".",
"objls",
"[",
"findex",
"]",
"]",
"==",
"objname",
":",
"idfobject",
"[",
"idfobject",
".",
"objls",
"[",
"findex",
"]",
"]",
"=",
"newname",
"theobject",
"=",
"idf",
".",
"getobject",
"(",
"objkey",
",",
"objname",
")",
"fieldname",
"=",
"[",
"item",
"for",
"item",
"in",
"theobject",
".",
"objls",
"if",
"item",
".",
"endswith",
"(",
"'Name'",
")",
"]",
"[",
"0",
"]",
"theobject",
"[",
"fieldname",
"]",
"=",
"newname",
"return",
"theobject"
] | rename all the refrences to this objname | [
"rename",
"all",
"the",
"refrences",
"to",
"this",
"objname"
] | train | https://github.com/santoshphilip/eppy/blob/55410ff7c11722f35bc4331ff5e00a0b86f787e1/eppy/modeleditor.py#L371-L389 | 0.003115 |
openstack/networking-cisco | networking_cisco/apps/saf/server/dfa_openstack_helper.py | DfaNeutronHelper.get_fw_policy | def get_fw_policy(self, policy_id):
"""Return the firewall policy, given its ID. """
policy = None
try:
policy = self.neutronclient.show_firewall_policy(policy_id)
except Exception as exc:
LOG.error("Failed to get firewall plcy for id %(id)s "
"Exc %(exc)s",
{'id': policy_id, 'exc': str(exc)})
return policy | python | def get_fw_policy(self, policy_id):
"""Return the firewall policy, given its ID. """
policy = None
try:
policy = self.neutronclient.show_firewall_policy(policy_id)
except Exception as exc:
LOG.error("Failed to get firewall plcy for id %(id)s "
"Exc %(exc)s",
{'id': policy_id, 'exc': str(exc)})
return policy | [
"def",
"get_fw_policy",
"(",
"self",
",",
"policy_id",
")",
":",
"policy",
"=",
"None",
"try",
":",
"policy",
"=",
"self",
".",
"neutronclient",
".",
"show_firewall_policy",
"(",
"policy_id",
")",
"except",
"Exception",
"as",
"exc",
":",
"LOG",
".",
"error",
"(",
"\"Failed to get firewall plcy for id %(id)s \"",
"\"Exc %(exc)s\"",
",",
"{",
"'id'",
":",
"policy_id",
",",
"'exc'",
":",
"str",
"(",
"exc",
")",
"}",
")",
"return",
"policy"
] | Return the firewall policy, given its ID. | [
"Return",
"the",
"firewall",
"policy",
"given",
"its",
"ID",
"."
] | train | https://github.com/openstack/networking-cisco/blob/aa58a30aec25b86f9aa5952b0863045975debfa9/networking_cisco/apps/saf/server/dfa_openstack_helper.py#L524-L533 | 0.004808 |
noahbenson/neuropythy | neuropythy/util/core.py | library_path | def library_path():
'''
library_path() yields the path of the neuropythy library.
'''
return os.path.abspath(os.path.join(os.path.dirname(os.path.dirname(__file__)), 'lib')) | python | def library_path():
'''
library_path() yields the path of the neuropythy library.
'''
return os.path.abspath(os.path.join(os.path.dirname(os.path.dirname(__file__)), 'lib')) | [
"def",
"library_path",
"(",
")",
":",
"return",
"os",
".",
"path",
".",
"abspath",
"(",
"os",
".",
"path",
".",
"join",
"(",
"os",
".",
"path",
".",
"dirname",
"(",
"os",
".",
"path",
".",
"dirname",
"(",
"__file__",
")",
")",
",",
"'lib'",
")",
")"
] | library_path() yields the path of the neuropythy library. | [
"library_path",
"()",
"yields",
"the",
"path",
"of",
"the",
"neuropythy",
"library",
"."
] | train | https://github.com/noahbenson/neuropythy/blob/b588889f6db36ddb9602ae4a72c1c0d3f41586b2/neuropythy/util/core.py#L1132-L1136 | 0.010582 |
sosy-lab/benchexec | benchexec/containerexecutor.py | ContainerExecutor._setup_container_filesystem | def _setup_container_filesystem(self, temp_dir, output_dir, memlimit, memory_nodes):
"""Setup the filesystem layout in the container.
As first step, we create a copy of all existing mountpoints in mount_base, recursively,
and as "private" mounts (i.e., changes to existing mountpoints afterwards won't propagate
to our copy).
Then we iterate over all mountpoints and change them
according to the mode the user has specified (hidden, read-only, overlay, or full-access).
This has do be done for each mountpoint because overlays are not recursive.
Then we chroot into the new mount hierarchy.
The new filesystem layout still has a view of the host's /proc.
We do not mount a fresh /proc here because the grandchild still needs the old /proc.
We do simply iterate over all existing mount points and set them to read-only/overlay them,
because it is easier to create a new hierarchy and chroot into it.
First, we still have access to the original mountpoints while doing so,
and second, we avoid race conditions if someone else changes the existing mountpoints.
@param temp_dir: The base directory under which all our directories should be created.
"""
# All strings here are bytes to avoid issues if existing mountpoints are invalid UTF-8.
temp_base = self._get_result_files_base(temp_dir).encode() # directory with files created by tool
temp_dir = temp_dir.encode()
tmpfs_opts = ["size=" + str(memlimit or "100%")]
if memory_nodes:
tmpfs_opts.append("mpol=bind:" + ",".join(map(str, memory_nodes)))
tmpfs_opts = (",".join(tmpfs_opts)).encode()
if self._container_tmpfs:
libc.mount(None, temp_dir, b"tmpfs", 0, tmpfs_opts)
mount_base = os.path.join(temp_dir, b"mount") # base dir for container mounts
os.mkdir(mount_base)
os.mkdir(temp_base)
def _is_below(path, target_path):
# compare with trailing slashes for cases like /foo and /foobar
path = os.path.join(path, b"")
target_path = os.path.join(target_path, b"")
return path.startswith(target_path)
def find_mode_for_dir(path, fstype=None):
if (path == b"/proc"):
# /proc is necessary for the grandchild to read PID, will be replaced later.
return DIR_READ_ONLY
if _is_below(path, b"/proc"):
# Irrelevant.
return None
parent_mode = None
result_mode = None
for special_dir, mode in self._dir_modes.items():
if _is_below(path, special_dir):
if path != special_dir:
parent_mode = mode
result_mode = mode
assert result_mode is not None
if result_mode == DIR_OVERLAY and (
_is_below(path, b"/dev") or
_is_below(path, b"/sys") or
fstype == b"cgroup"):
# Overlay does not make sense for /dev, /sys, and all cgroups.
return DIR_READ_ONLY
if result_mode == DIR_OVERLAY and (
fstype == b"autofs" or
fstype == b"vfat" or
fstype == b"ntfs"):
# Overlayfs does not support these as underlying file systems.
logging.debug("Cannot use overlay mode for %s because it has file system %s. "
"Using read-only mode instead.",
path.decode(), fstype.decode())
return DIR_READ_ONLY
if result_mode == DIR_HIDDEN and parent_mode == DIR_HIDDEN:
# No need to recursively recreate mountpoints in hidden dirs.
return None
return result_mode
# Overlayfs needs its own additional temporary directory ("work" directory).
# temp_base will be the "upper" layer, the host FS the "lower" layer,
# and mount_base the mount target.
work_base = os.path.join(temp_dir, b"overlayfs")
os.mkdir(work_base)
# Create a copy of host's mountpoints.
# Setting MS_PRIVATE flag discouples our mount namespace from the hosts's,
# i.e., mounts we do are not seen by the host, and any (un)mounts the host does afterward
# are not seen by us. The latter is desired such that new mounts (e.g.,
# USB sticks being plugged in) do not appear in the container.
# Blocking host-side unmounts from being propagated has the disadvantage
# that any unmounts done by the sysadmin won't really unmount the device
# because it stays mounted in the container and thus keep the device busy
# (cf. https://bugs.debian.org/cgi-bin/bugreport.cgi?bug=739593#85).
# We could allow unmounts being propated with MS_SLAVE instead of MS_PRIVATE,
# but we prefer to have the mount namespace of the container being
# unchanged during run execution.
container.make_bind_mount(b"/", mount_base, recursive=True, private=True)
# Ensure each special dir is a mountpoint such that the next loop covers it.
for special_dir in self._dir_modes.keys():
mount_path = mount_base + special_dir
temp_path = temp_base + special_dir
try:
container.make_bind_mount(mount_path, mount_path)
except OSError as e:
# on btrfs, non-recursive bind mounts faitl
if e.errno == errno.EINVAL:
try:
container.make_bind_mount(mount_path, mount_path, recursive=True)
except OSError as e2:
logging.debug("Failed to make %s a (recursive) bind mount: %s", mount_path, e2)
else:
logging.debug("Failed to make %s a bind mount: %s", mount_path, e)
if not os.path.exists(temp_path):
os.makedirs(temp_path)
# Set desired access mode for each mountpoint.
for unused_source, full_mountpoint, fstype, options in list(container.get_mount_points()):
if not _is_below(full_mountpoint, mount_base):
continue
mountpoint = full_mountpoint[len(mount_base):] or b"/"
mode = find_mode_for_dir(mountpoint, fstype)
if not mode:
continue
if not os.access(os.path.dirname(mountpoint), os.X_OK):
# If parent is not accessible we cannot mount something on mountpoint.
# We mark the inaccessible directory as hidden because otherwise the mountpoint
# could become accessible (directly!) if the permissions on the parent
# are relaxed during container execution.
original_mountpoint = mountpoint
parent = os.path.dirname(mountpoint)
while not os.access(parent, os.X_OK):
mountpoint = parent
parent = os.path.dirname(mountpoint)
mode = DIR_HIDDEN
logging.debug(
"Marking inaccessible directory '%s' as hidden "
"because it contains a mountpoint at '%s'",
mountpoint.decode(), original_mountpoint.decode())
else:
logging.debug("Mounting '%s' as %s", mountpoint.decode(), mode)
mount_path = mount_base + mountpoint
temp_path = temp_base + mountpoint
work_path = work_base + mountpoint
if mode == DIR_OVERLAY:
if not os.path.exists(temp_path):
os.makedirs(temp_path)
if not os.path.exists(work_path):
os.makedirs(work_path)
try:
# Previous mount in this place not needed if replaced with overlay dir.
libc.umount(mount_path)
except OSError as e:
logging.debug(e)
try:
container.make_overlay_mount(mount_path, mountpoint, temp_path, work_path)
except OSError as e:
raise OSError(e.errno,
"Creating overlay mount for '{}' failed: {}. "
"Please use other directory modes."
.format(mountpoint.decode(), os.strerror(e.errno)))
elif mode == DIR_HIDDEN:
if not os.path.exists(temp_path):
os.makedirs(temp_path)
try:
# Previous mount in this place not needed if replaced with hidden dir.
libc.umount(mount_path)
except OSError as e:
logging.debug(e)
container.make_bind_mount(temp_path, mount_path)
elif mode == DIR_READ_ONLY:
try:
container.remount_with_additional_flags(mount_path, options, libc.MS_RDONLY)
except OSError as e:
if e.errno == errno.EACCES:
logging.warning(
"Cannot mount '%s', directory may be missing from container.",
mountpoint.decode())
else:
# If this mountpoint is below an overlay/hidden dir re-create mountpoint.
# Linux does not support making read-only bind mounts in one step:
# https://lwn.net/Articles/281157/ http://man7.org/linux/man-pages/man8/mount.8.html
container.make_bind_mount(
mountpoint, mount_path, recursive=True, private=True)
container.remount_with_additional_flags(mount_path, options, libc.MS_RDONLY)
elif mode == DIR_FULL_ACCESS:
try:
# Ensure directory is still a mountpoint by attempting to remount.
container.remount_with_additional_flags(mount_path, options, 0)
except OSError as e:
if e.errno == errno.EACCES:
logging.warning(
"Cannot mount '%s', directory may be missing from container.",
mountpoint.decode())
else:
# If this mountpoint is below an overlay/hidden dir re-create mountpoint.
container.make_bind_mount(
mountpoint, mount_path, recursive=True, private=True)
else:
assert False
# Now configure some special hard-coded cases
def make_tmpfs_dir(path):
"""Ensure that a tmpfs is mounted on path, if the path exists"""
if path in self._dir_modes:
return # explicitly configured by user
mount_tmpfs = mount_base + path
temp_tmpfs = temp_base + path
util.makedirs(temp_tmpfs, exist_ok=True)
if os.path.isdir(mount_tmpfs):
# If we already have a tmpfs, we can just bind mount it, otherwise we need one
if self._container_tmpfs:
container.make_bind_mount(temp_tmpfs, mount_tmpfs)
else:
libc.mount(None, mount_tmpfs, b"tmpfs", 0, tmpfs_opts)
# The following directories should be writable RAM disks for Posix shared memory.
# For example, the Python multiprocessing module explicitly checks for a tmpfs instance.
make_tmpfs_dir(b"/dev/shm")
make_tmpfs_dir(b"/run/shm")
if self._container_system_config:
# If overlayfs is not used for /etc, we need additional bind mounts
# for files in /etc that we want to override, like /etc/passwd
config_mount_base = mount_base if find_mode_for_dir(b"/etc") != DIR_OVERLAY else None
container.setup_container_system_config(temp_base, config_mount_base )
if output_dir:
# We need a way to see temp_base in the container in order to be able to copy result
# files out of it, so we need a directory that is guaranteed to exist in order to use
# it as mountpoint for a bind mount to temp_base.
# Of course, the tool inside the container should not have access to temp_base,
# so we will add another bind mount with an empty directory on top
# (equivalent to --hidden-dir). After the tool terminates we can unmount
# the top-level bind mount and then access temp_base. However, this works only
# if there is no other mount point below that directory, and the user can force us
# to create mount points at arbitrary directory if a directory mode is specified.
# So we need an existing directory with no mount points below, and luckily temp_dir
# fulfills all requirements (because we have just created it as fresh drectory ourselves).
# So we mount temp_base outside of the container to temp_dir inside.
util.makedirs(mount_base + temp_dir, exist_ok=True)
container.make_bind_mount(temp_base, mount_base + temp_dir, read_only=True)
# And the following if branch will automatically hide the bind
# mount below an empty directory.
# If necessary, (i.e., if /tmp is not already hidden),
# hide the directory where we store our files from processes in the container
# by mounting an empty directory over it.
if os.path.exists(mount_base + temp_dir):
util.makedirs(temp_base + temp_dir, exist_ok=True)
container.make_bind_mount(temp_base + temp_dir, mount_base + temp_dir)
os.chroot(mount_base) | python | def _setup_container_filesystem(self, temp_dir, output_dir, memlimit, memory_nodes):
"""Setup the filesystem layout in the container.
As first step, we create a copy of all existing mountpoints in mount_base, recursively,
and as "private" mounts (i.e., changes to existing mountpoints afterwards won't propagate
to our copy).
Then we iterate over all mountpoints and change them
according to the mode the user has specified (hidden, read-only, overlay, or full-access).
This has do be done for each mountpoint because overlays are not recursive.
Then we chroot into the new mount hierarchy.
The new filesystem layout still has a view of the host's /proc.
We do not mount a fresh /proc here because the grandchild still needs the old /proc.
We do simply iterate over all existing mount points and set them to read-only/overlay them,
because it is easier to create a new hierarchy and chroot into it.
First, we still have access to the original mountpoints while doing so,
and second, we avoid race conditions if someone else changes the existing mountpoints.
@param temp_dir: The base directory under which all our directories should be created.
"""
# All strings here are bytes to avoid issues if existing mountpoints are invalid UTF-8.
temp_base = self._get_result_files_base(temp_dir).encode() # directory with files created by tool
temp_dir = temp_dir.encode()
tmpfs_opts = ["size=" + str(memlimit or "100%")]
if memory_nodes:
tmpfs_opts.append("mpol=bind:" + ",".join(map(str, memory_nodes)))
tmpfs_opts = (",".join(tmpfs_opts)).encode()
if self._container_tmpfs:
libc.mount(None, temp_dir, b"tmpfs", 0, tmpfs_opts)
mount_base = os.path.join(temp_dir, b"mount") # base dir for container mounts
os.mkdir(mount_base)
os.mkdir(temp_base)
def _is_below(path, target_path):
# compare with trailing slashes for cases like /foo and /foobar
path = os.path.join(path, b"")
target_path = os.path.join(target_path, b"")
return path.startswith(target_path)
def find_mode_for_dir(path, fstype=None):
if (path == b"/proc"):
# /proc is necessary for the grandchild to read PID, will be replaced later.
return DIR_READ_ONLY
if _is_below(path, b"/proc"):
# Irrelevant.
return None
parent_mode = None
result_mode = None
for special_dir, mode in self._dir_modes.items():
if _is_below(path, special_dir):
if path != special_dir:
parent_mode = mode
result_mode = mode
assert result_mode is not None
if result_mode == DIR_OVERLAY and (
_is_below(path, b"/dev") or
_is_below(path, b"/sys") or
fstype == b"cgroup"):
# Overlay does not make sense for /dev, /sys, and all cgroups.
return DIR_READ_ONLY
if result_mode == DIR_OVERLAY and (
fstype == b"autofs" or
fstype == b"vfat" or
fstype == b"ntfs"):
# Overlayfs does not support these as underlying file systems.
logging.debug("Cannot use overlay mode for %s because it has file system %s. "
"Using read-only mode instead.",
path.decode(), fstype.decode())
return DIR_READ_ONLY
if result_mode == DIR_HIDDEN and parent_mode == DIR_HIDDEN:
# No need to recursively recreate mountpoints in hidden dirs.
return None
return result_mode
# Overlayfs needs its own additional temporary directory ("work" directory).
# temp_base will be the "upper" layer, the host FS the "lower" layer,
# and mount_base the mount target.
work_base = os.path.join(temp_dir, b"overlayfs")
os.mkdir(work_base)
# Create a copy of host's mountpoints.
# Setting MS_PRIVATE flag discouples our mount namespace from the hosts's,
# i.e., mounts we do are not seen by the host, and any (un)mounts the host does afterward
# are not seen by us. The latter is desired such that new mounts (e.g.,
# USB sticks being plugged in) do not appear in the container.
# Blocking host-side unmounts from being propagated has the disadvantage
# that any unmounts done by the sysadmin won't really unmount the device
# because it stays mounted in the container and thus keep the device busy
# (cf. https://bugs.debian.org/cgi-bin/bugreport.cgi?bug=739593#85).
# We could allow unmounts being propated with MS_SLAVE instead of MS_PRIVATE,
# but we prefer to have the mount namespace of the container being
# unchanged during run execution.
container.make_bind_mount(b"/", mount_base, recursive=True, private=True)
# Ensure each special dir is a mountpoint such that the next loop covers it.
for special_dir in self._dir_modes.keys():
mount_path = mount_base + special_dir
temp_path = temp_base + special_dir
try:
container.make_bind_mount(mount_path, mount_path)
except OSError as e:
# on btrfs, non-recursive bind mounts faitl
if e.errno == errno.EINVAL:
try:
container.make_bind_mount(mount_path, mount_path, recursive=True)
except OSError as e2:
logging.debug("Failed to make %s a (recursive) bind mount: %s", mount_path, e2)
else:
logging.debug("Failed to make %s a bind mount: %s", mount_path, e)
if not os.path.exists(temp_path):
os.makedirs(temp_path)
# Set desired access mode for each mountpoint.
for unused_source, full_mountpoint, fstype, options in list(container.get_mount_points()):
if not _is_below(full_mountpoint, mount_base):
continue
mountpoint = full_mountpoint[len(mount_base):] or b"/"
mode = find_mode_for_dir(mountpoint, fstype)
if not mode:
continue
if not os.access(os.path.dirname(mountpoint), os.X_OK):
# If parent is not accessible we cannot mount something on mountpoint.
# We mark the inaccessible directory as hidden because otherwise the mountpoint
# could become accessible (directly!) if the permissions on the parent
# are relaxed during container execution.
original_mountpoint = mountpoint
parent = os.path.dirname(mountpoint)
while not os.access(parent, os.X_OK):
mountpoint = parent
parent = os.path.dirname(mountpoint)
mode = DIR_HIDDEN
logging.debug(
"Marking inaccessible directory '%s' as hidden "
"because it contains a mountpoint at '%s'",
mountpoint.decode(), original_mountpoint.decode())
else:
logging.debug("Mounting '%s' as %s", mountpoint.decode(), mode)
mount_path = mount_base + mountpoint
temp_path = temp_base + mountpoint
work_path = work_base + mountpoint
if mode == DIR_OVERLAY:
if not os.path.exists(temp_path):
os.makedirs(temp_path)
if not os.path.exists(work_path):
os.makedirs(work_path)
try:
# Previous mount in this place not needed if replaced with overlay dir.
libc.umount(mount_path)
except OSError as e:
logging.debug(e)
try:
container.make_overlay_mount(mount_path, mountpoint, temp_path, work_path)
except OSError as e:
raise OSError(e.errno,
"Creating overlay mount for '{}' failed: {}. "
"Please use other directory modes."
.format(mountpoint.decode(), os.strerror(e.errno)))
elif mode == DIR_HIDDEN:
if not os.path.exists(temp_path):
os.makedirs(temp_path)
try:
# Previous mount in this place not needed if replaced with hidden dir.
libc.umount(mount_path)
except OSError as e:
logging.debug(e)
container.make_bind_mount(temp_path, mount_path)
elif mode == DIR_READ_ONLY:
try:
container.remount_with_additional_flags(mount_path, options, libc.MS_RDONLY)
except OSError as e:
if e.errno == errno.EACCES:
logging.warning(
"Cannot mount '%s', directory may be missing from container.",
mountpoint.decode())
else:
# If this mountpoint is below an overlay/hidden dir re-create mountpoint.
# Linux does not support making read-only bind mounts in one step:
# https://lwn.net/Articles/281157/ http://man7.org/linux/man-pages/man8/mount.8.html
container.make_bind_mount(
mountpoint, mount_path, recursive=True, private=True)
container.remount_with_additional_flags(mount_path, options, libc.MS_RDONLY)
elif mode == DIR_FULL_ACCESS:
try:
# Ensure directory is still a mountpoint by attempting to remount.
container.remount_with_additional_flags(mount_path, options, 0)
except OSError as e:
if e.errno == errno.EACCES:
logging.warning(
"Cannot mount '%s', directory may be missing from container.",
mountpoint.decode())
else:
# If this mountpoint is below an overlay/hidden dir re-create mountpoint.
container.make_bind_mount(
mountpoint, mount_path, recursive=True, private=True)
else:
assert False
# Now configure some special hard-coded cases
def make_tmpfs_dir(path):
"""Ensure that a tmpfs is mounted on path, if the path exists"""
if path in self._dir_modes:
return # explicitly configured by user
mount_tmpfs = mount_base + path
temp_tmpfs = temp_base + path
util.makedirs(temp_tmpfs, exist_ok=True)
if os.path.isdir(mount_tmpfs):
# If we already have a tmpfs, we can just bind mount it, otherwise we need one
if self._container_tmpfs:
container.make_bind_mount(temp_tmpfs, mount_tmpfs)
else:
libc.mount(None, mount_tmpfs, b"tmpfs", 0, tmpfs_opts)
# The following directories should be writable RAM disks for Posix shared memory.
# For example, the Python multiprocessing module explicitly checks for a tmpfs instance.
make_tmpfs_dir(b"/dev/shm")
make_tmpfs_dir(b"/run/shm")
if self._container_system_config:
# If overlayfs is not used for /etc, we need additional bind mounts
# for files in /etc that we want to override, like /etc/passwd
config_mount_base = mount_base if find_mode_for_dir(b"/etc") != DIR_OVERLAY else None
container.setup_container_system_config(temp_base, config_mount_base )
if output_dir:
# We need a way to see temp_base in the container in order to be able to copy result
# files out of it, so we need a directory that is guaranteed to exist in order to use
# it as mountpoint for a bind mount to temp_base.
# Of course, the tool inside the container should not have access to temp_base,
# so we will add another bind mount with an empty directory on top
# (equivalent to --hidden-dir). After the tool terminates we can unmount
# the top-level bind mount and then access temp_base. However, this works only
# if there is no other mount point below that directory, and the user can force us
# to create mount points at arbitrary directory if a directory mode is specified.
# So we need an existing directory with no mount points below, and luckily temp_dir
# fulfills all requirements (because we have just created it as fresh drectory ourselves).
# So we mount temp_base outside of the container to temp_dir inside.
util.makedirs(mount_base + temp_dir, exist_ok=True)
container.make_bind_mount(temp_base, mount_base + temp_dir, read_only=True)
# And the following if branch will automatically hide the bind
# mount below an empty directory.
# If necessary, (i.e., if /tmp is not already hidden),
# hide the directory where we store our files from processes in the container
# by mounting an empty directory over it.
if os.path.exists(mount_base + temp_dir):
util.makedirs(temp_base + temp_dir, exist_ok=True)
container.make_bind_mount(temp_base + temp_dir, mount_base + temp_dir)
os.chroot(mount_base) | [
"def",
"_setup_container_filesystem",
"(",
"self",
",",
"temp_dir",
",",
"output_dir",
",",
"memlimit",
",",
"memory_nodes",
")",
":",
"# All strings here are bytes to avoid issues if existing mountpoints are invalid UTF-8.",
"temp_base",
"=",
"self",
".",
"_get_result_files_base",
"(",
"temp_dir",
")",
".",
"encode",
"(",
")",
"# directory with files created by tool",
"temp_dir",
"=",
"temp_dir",
".",
"encode",
"(",
")",
"tmpfs_opts",
"=",
"[",
"\"size=\"",
"+",
"str",
"(",
"memlimit",
"or",
"\"100%\"",
")",
"]",
"if",
"memory_nodes",
":",
"tmpfs_opts",
".",
"append",
"(",
"\"mpol=bind:\"",
"+",
"\",\"",
".",
"join",
"(",
"map",
"(",
"str",
",",
"memory_nodes",
")",
")",
")",
"tmpfs_opts",
"=",
"(",
"\",\"",
".",
"join",
"(",
"tmpfs_opts",
")",
")",
".",
"encode",
"(",
")",
"if",
"self",
".",
"_container_tmpfs",
":",
"libc",
".",
"mount",
"(",
"None",
",",
"temp_dir",
",",
"b\"tmpfs\"",
",",
"0",
",",
"tmpfs_opts",
")",
"mount_base",
"=",
"os",
".",
"path",
".",
"join",
"(",
"temp_dir",
",",
"b\"mount\"",
")",
"# base dir for container mounts",
"os",
".",
"mkdir",
"(",
"mount_base",
")",
"os",
".",
"mkdir",
"(",
"temp_base",
")",
"def",
"_is_below",
"(",
"path",
",",
"target_path",
")",
":",
"# compare with trailing slashes for cases like /foo and /foobar",
"path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"path",
",",
"b\"\"",
")",
"target_path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"target_path",
",",
"b\"\"",
")",
"return",
"path",
".",
"startswith",
"(",
"target_path",
")",
"def",
"find_mode_for_dir",
"(",
"path",
",",
"fstype",
"=",
"None",
")",
":",
"if",
"(",
"path",
"==",
"b\"/proc\"",
")",
":",
"# /proc is necessary for the grandchild to read PID, will be replaced later.",
"return",
"DIR_READ_ONLY",
"if",
"_is_below",
"(",
"path",
",",
"b\"/proc\"",
")",
":",
"# Irrelevant.",
"return",
"None",
"parent_mode",
"=",
"None",
"result_mode",
"=",
"None",
"for",
"special_dir",
",",
"mode",
"in",
"self",
".",
"_dir_modes",
".",
"items",
"(",
")",
":",
"if",
"_is_below",
"(",
"path",
",",
"special_dir",
")",
":",
"if",
"path",
"!=",
"special_dir",
":",
"parent_mode",
"=",
"mode",
"result_mode",
"=",
"mode",
"assert",
"result_mode",
"is",
"not",
"None",
"if",
"result_mode",
"==",
"DIR_OVERLAY",
"and",
"(",
"_is_below",
"(",
"path",
",",
"b\"/dev\"",
")",
"or",
"_is_below",
"(",
"path",
",",
"b\"/sys\"",
")",
"or",
"fstype",
"==",
"b\"cgroup\"",
")",
":",
"# Overlay does not make sense for /dev, /sys, and all cgroups.",
"return",
"DIR_READ_ONLY",
"if",
"result_mode",
"==",
"DIR_OVERLAY",
"and",
"(",
"fstype",
"==",
"b\"autofs\"",
"or",
"fstype",
"==",
"b\"vfat\"",
"or",
"fstype",
"==",
"b\"ntfs\"",
")",
":",
"# Overlayfs does not support these as underlying file systems.",
"logging",
".",
"debug",
"(",
"\"Cannot use overlay mode for %s because it has file system %s. \"",
"\"Using read-only mode instead.\"",
",",
"path",
".",
"decode",
"(",
")",
",",
"fstype",
".",
"decode",
"(",
")",
")",
"return",
"DIR_READ_ONLY",
"if",
"result_mode",
"==",
"DIR_HIDDEN",
"and",
"parent_mode",
"==",
"DIR_HIDDEN",
":",
"# No need to recursively recreate mountpoints in hidden dirs.",
"return",
"None",
"return",
"result_mode",
"# Overlayfs needs its own additional temporary directory (\"work\" directory).",
"# temp_base will be the \"upper\" layer, the host FS the \"lower\" layer,",
"# and mount_base the mount target.",
"work_base",
"=",
"os",
".",
"path",
".",
"join",
"(",
"temp_dir",
",",
"b\"overlayfs\"",
")",
"os",
".",
"mkdir",
"(",
"work_base",
")",
"# Create a copy of host's mountpoints.",
"# Setting MS_PRIVATE flag discouples our mount namespace from the hosts's,",
"# i.e., mounts we do are not seen by the host, and any (un)mounts the host does afterward",
"# are not seen by us. The latter is desired such that new mounts (e.g.,",
"# USB sticks being plugged in) do not appear in the container.",
"# Blocking host-side unmounts from being propagated has the disadvantage",
"# that any unmounts done by the sysadmin won't really unmount the device",
"# because it stays mounted in the container and thus keep the device busy",
"# (cf. https://bugs.debian.org/cgi-bin/bugreport.cgi?bug=739593#85).",
"# We could allow unmounts being propated with MS_SLAVE instead of MS_PRIVATE,",
"# but we prefer to have the mount namespace of the container being",
"# unchanged during run execution.",
"container",
".",
"make_bind_mount",
"(",
"b\"/\"",
",",
"mount_base",
",",
"recursive",
"=",
"True",
",",
"private",
"=",
"True",
")",
"# Ensure each special dir is a mountpoint such that the next loop covers it.",
"for",
"special_dir",
"in",
"self",
".",
"_dir_modes",
".",
"keys",
"(",
")",
":",
"mount_path",
"=",
"mount_base",
"+",
"special_dir",
"temp_path",
"=",
"temp_base",
"+",
"special_dir",
"try",
":",
"container",
".",
"make_bind_mount",
"(",
"mount_path",
",",
"mount_path",
")",
"except",
"OSError",
"as",
"e",
":",
"# on btrfs, non-recursive bind mounts faitl",
"if",
"e",
".",
"errno",
"==",
"errno",
".",
"EINVAL",
":",
"try",
":",
"container",
".",
"make_bind_mount",
"(",
"mount_path",
",",
"mount_path",
",",
"recursive",
"=",
"True",
")",
"except",
"OSError",
"as",
"e2",
":",
"logging",
".",
"debug",
"(",
"\"Failed to make %s a (recursive) bind mount: %s\"",
",",
"mount_path",
",",
"e2",
")",
"else",
":",
"logging",
".",
"debug",
"(",
"\"Failed to make %s a bind mount: %s\"",
",",
"mount_path",
",",
"e",
")",
"if",
"not",
"os",
".",
"path",
".",
"exists",
"(",
"temp_path",
")",
":",
"os",
".",
"makedirs",
"(",
"temp_path",
")",
"# Set desired access mode for each mountpoint.",
"for",
"unused_source",
",",
"full_mountpoint",
",",
"fstype",
",",
"options",
"in",
"list",
"(",
"container",
".",
"get_mount_points",
"(",
")",
")",
":",
"if",
"not",
"_is_below",
"(",
"full_mountpoint",
",",
"mount_base",
")",
":",
"continue",
"mountpoint",
"=",
"full_mountpoint",
"[",
"len",
"(",
"mount_base",
")",
":",
"]",
"or",
"b\"/\"",
"mode",
"=",
"find_mode_for_dir",
"(",
"mountpoint",
",",
"fstype",
")",
"if",
"not",
"mode",
":",
"continue",
"if",
"not",
"os",
".",
"access",
"(",
"os",
".",
"path",
".",
"dirname",
"(",
"mountpoint",
")",
",",
"os",
".",
"X_OK",
")",
":",
"# If parent is not accessible we cannot mount something on mountpoint.",
"# We mark the inaccessible directory as hidden because otherwise the mountpoint",
"# could become accessible (directly!) if the permissions on the parent",
"# are relaxed during container execution.",
"original_mountpoint",
"=",
"mountpoint",
"parent",
"=",
"os",
".",
"path",
".",
"dirname",
"(",
"mountpoint",
")",
"while",
"not",
"os",
".",
"access",
"(",
"parent",
",",
"os",
".",
"X_OK",
")",
":",
"mountpoint",
"=",
"parent",
"parent",
"=",
"os",
".",
"path",
".",
"dirname",
"(",
"mountpoint",
")",
"mode",
"=",
"DIR_HIDDEN",
"logging",
".",
"debug",
"(",
"\"Marking inaccessible directory '%s' as hidden \"",
"\"because it contains a mountpoint at '%s'\"",
",",
"mountpoint",
".",
"decode",
"(",
")",
",",
"original_mountpoint",
".",
"decode",
"(",
")",
")",
"else",
":",
"logging",
".",
"debug",
"(",
"\"Mounting '%s' as %s\"",
",",
"mountpoint",
".",
"decode",
"(",
")",
",",
"mode",
")",
"mount_path",
"=",
"mount_base",
"+",
"mountpoint",
"temp_path",
"=",
"temp_base",
"+",
"mountpoint",
"work_path",
"=",
"work_base",
"+",
"mountpoint",
"if",
"mode",
"==",
"DIR_OVERLAY",
":",
"if",
"not",
"os",
".",
"path",
".",
"exists",
"(",
"temp_path",
")",
":",
"os",
".",
"makedirs",
"(",
"temp_path",
")",
"if",
"not",
"os",
".",
"path",
".",
"exists",
"(",
"work_path",
")",
":",
"os",
".",
"makedirs",
"(",
"work_path",
")",
"try",
":",
"# Previous mount in this place not needed if replaced with overlay dir.",
"libc",
".",
"umount",
"(",
"mount_path",
")",
"except",
"OSError",
"as",
"e",
":",
"logging",
".",
"debug",
"(",
"e",
")",
"try",
":",
"container",
".",
"make_overlay_mount",
"(",
"mount_path",
",",
"mountpoint",
",",
"temp_path",
",",
"work_path",
")",
"except",
"OSError",
"as",
"e",
":",
"raise",
"OSError",
"(",
"e",
".",
"errno",
",",
"\"Creating overlay mount for '{}' failed: {}. \"",
"\"Please use other directory modes.\"",
".",
"format",
"(",
"mountpoint",
".",
"decode",
"(",
")",
",",
"os",
".",
"strerror",
"(",
"e",
".",
"errno",
")",
")",
")",
"elif",
"mode",
"==",
"DIR_HIDDEN",
":",
"if",
"not",
"os",
".",
"path",
".",
"exists",
"(",
"temp_path",
")",
":",
"os",
".",
"makedirs",
"(",
"temp_path",
")",
"try",
":",
"# Previous mount in this place not needed if replaced with hidden dir.",
"libc",
".",
"umount",
"(",
"mount_path",
")",
"except",
"OSError",
"as",
"e",
":",
"logging",
".",
"debug",
"(",
"e",
")",
"container",
".",
"make_bind_mount",
"(",
"temp_path",
",",
"mount_path",
")",
"elif",
"mode",
"==",
"DIR_READ_ONLY",
":",
"try",
":",
"container",
".",
"remount_with_additional_flags",
"(",
"mount_path",
",",
"options",
",",
"libc",
".",
"MS_RDONLY",
")",
"except",
"OSError",
"as",
"e",
":",
"if",
"e",
".",
"errno",
"==",
"errno",
".",
"EACCES",
":",
"logging",
".",
"warning",
"(",
"\"Cannot mount '%s', directory may be missing from container.\"",
",",
"mountpoint",
".",
"decode",
"(",
")",
")",
"else",
":",
"# If this mountpoint is below an overlay/hidden dir re-create mountpoint.",
"# Linux does not support making read-only bind mounts in one step:",
"# https://lwn.net/Articles/281157/ http://man7.org/linux/man-pages/man8/mount.8.html",
"container",
".",
"make_bind_mount",
"(",
"mountpoint",
",",
"mount_path",
",",
"recursive",
"=",
"True",
",",
"private",
"=",
"True",
")",
"container",
".",
"remount_with_additional_flags",
"(",
"mount_path",
",",
"options",
",",
"libc",
".",
"MS_RDONLY",
")",
"elif",
"mode",
"==",
"DIR_FULL_ACCESS",
":",
"try",
":",
"# Ensure directory is still a mountpoint by attempting to remount.",
"container",
".",
"remount_with_additional_flags",
"(",
"mount_path",
",",
"options",
",",
"0",
")",
"except",
"OSError",
"as",
"e",
":",
"if",
"e",
".",
"errno",
"==",
"errno",
".",
"EACCES",
":",
"logging",
".",
"warning",
"(",
"\"Cannot mount '%s', directory may be missing from container.\"",
",",
"mountpoint",
".",
"decode",
"(",
")",
")",
"else",
":",
"# If this mountpoint is below an overlay/hidden dir re-create mountpoint.",
"container",
".",
"make_bind_mount",
"(",
"mountpoint",
",",
"mount_path",
",",
"recursive",
"=",
"True",
",",
"private",
"=",
"True",
")",
"else",
":",
"assert",
"False",
"# Now configure some special hard-coded cases",
"def",
"make_tmpfs_dir",
"(",
"path",
")",
":",
"\"\"\"Ensure that a tmpfs is mounted on path, if the path exists\"\"\"",
"if",
"path",
"in",
"self",
".",
"_dir_modes",
":",
"return",
"# explicitly configured by user",
"mount_tmpfs",
"=",
"mount_base",
"+",
"path",
"temp_tmpfs",
"=",
"temp_base",
"+",
"path",
"util",
".",
"makedirs",
"(",
"temp_tmpfs",
",",
"exist_ok",
"=",
"True",
")",
"if",
"os",
".",
"path",
".",
"isdir",
"(",
"mount_tmpfs",
")",
":",
"# If we already have a tmpfs, we can just bind mount it, otherwise we need one",
"if",
"self",
".",
"_container_tmpfs",
":",
"container",
".",
"make_bind_mount",
"(",
"temp_tmpfs",
",",
"mount_tmpfs",
")",
"else",
":",
"libc",
".",
"mount",
"(",
"None",
",",
"mount_tmpfs",
",",
"b\"tmpfs\"",
",",
"0",
",",
"tmpfs_opts",
")",
"# The following directories should be writable RAM disks for Posix shared memory.",
"# For example, the Python multiprocessing module explicitly checks for a tmpfs instance.",
"make_tmpfs_dir",
"(",
"b\"/dev/shm\"",
")",
"make_tmpfs_dir",
"(",
"b\"/run/shm\"",
")",
"if",
"self",
".",
"_container_system_config",
":",
"# If overlayfs is not used for /etc, we need additional bind mounts",
"# for files in /etc that we want to override, like /etc/passwd",
"config_mount_base",
"=",
"mount_base",
"if",
"find_mode_for_dir",
"(",
"b\"/etc\"",
")",
"!=",
"DIR_OVERLAY",
"else",
"None",
"container",
".",
"setup_container_system_config",
"(",
"temp_base",
",",
"config_mount_base",
")",
"if",
"output_dir",
":",
"# We need a way to see temp_base in the container in order to be able to copy result",
"# files out of it, so we need a directory that is guaranteed to exist in order to use",
"# it as mountpoint for a bind mount to temp_base.",
"# Of course, the tool inside the container should not have access to temp_base,",
"# so we will add another bind mount with an empty directory on top",
"# (equivalent to --hidden-dir). After the tool terminates we can unmount",
"# the top-level bind mount and then access temp_base. However, this works only",
"# if there is no other mount point below that directory, and the user can force us",
"# to create mount points at arbitrary directory if a directory mode is specified.",
"# So we need an existing directory with no mount points below, and luckily temp_dir",
"# fulfills all requirements (because we have just created it as fresh drectory ourselves).",
"# So we mount temp_base outside of the container to temp_dir inside.",
"util",
".",
"makedirs",
"(",
"mount_base",
"+",
"temp_dir",
",",
"exist_ok",
"=",
"True",
")",
"container",
".",
"make_bind_mount",
"(",
"temp_base",
",",
"mount_base",
"+",
"temp_dir",
",",
"read_only",
"=",
"True",
")",
"# And the following if branch will automatically hide the bind",
"# mount below an empty directory.",
"# If necessary, (i.e., if /tmp is not already hidden),",
"# hide the directory where we store our files from processes in the container",
"# by mounting an empty directory over it.",
"if",
"os",
".",
"path",
".",
"exists",
"(",
"mount_base",
"+",
"temp_dir",
")",
":",
"util",
".",
"makedirs",
"(",
"temp_base",
"+",
"temp_dir",
",",
"exist_ok",
"=",
"True",
")",
"container",
".",
"make_bind_mount",
"(",
"temp_base",
"+",
"temp_dir",
",",
"mount_base",
"+",
"temp_dir",
")",
"os",
".",
"chroot",
"(",
"mount_base",
")"
] | Setup the filesystem layout in the container.
As first step, we create a copy of all existing mountpoints in mount_base, recursively,
and as "private" mounts (i.e., changes to existing mountpoints afterwards won't propagate
to our copy).
Then we iterate over all mountpoints and change them
according to the mode the user has specified (hidden, read-only, overlay, or full-access).
This has do be done for each mountpoint because overlays are not recursive.
Then we chroot into the new mount hierarchy.
The new filesystem layout still has a view of the host's /proc.
We do not mount a fresh /proc here because the grandchild still needs the old /proc.
We do simply iterate over all existing mount points and set them to read-only/overlay them,
because it is easier to create a new hierarchy and chroot into it.
First, we still have access to the original mountpoints while doing so,
and second, we avoid race conditions if someone else changes the existing mountpoints.
@param temp_dir: The base directory under which all our directories should be created. | [
"Setup",
"the",
"filesystem",
"layout",
"in",
"the",
"container",
".",
"As",
"first",
"step",
"we",
"create",
"a",
"copy",
"of",
"all",
"existing",
"mountpoints",
"in",
"mount_base",
"recursively",
"and",
"as",
"private",
"mounts",
"(",
"i",
".",
"e",
".",
"changes",
"to",
"existing",
"mountpoints",
"afterwards",
"won",
"t",
"propagate",
"to",
"our",
"copy",
")",
".",
"Then",
"we",
"iterate",
"over",
"all",
"mountpoints",
"and",
"change",
"them",
"according",
"to",
"the",
"mode",
"the",
"user",
"has",
"specified",
"(",
"hidden",
"read",
"-",
"only",
"overlay",
"or",
"full",
"-",
"access",
")",
".",
"This",
"has",
"do",
"be",
"done",
"for",
"each",
"mountpoint",
"because",
"overlays",
"are",
"not",
"recursive",
".",
"Then",
"we",
"chroot",
"into",
"the",
"new",
"mount",
"hierarchy",
"."
] | train | https://github.com/sosy-lab/benchexec/blob/44428f67f41384c03aea13e7e25f884764653617/benchexec/containerexecutor.py#L656-L918 | 0.005162 |
arthurk/django-disqus | disqus/api.py | DisqusClient._get_request | def _get_request(self, request_url, request_method, **params):
"""
Return a Request object that has the GET parameters
attached to the url or the POST data attached to the object.
"""
if request_method == 'GET':
if params:
request_url += '&%s' % urlencode(params)
request = Request(request_url)
elif request_method == 'POST':
request = Request(request_url, urlencode(params, doseq=1))
return request | python | def _get_request(self, request_url, request_method, **params):
"""
Return a Request object that has the GET parameters
attached to the url or the POST data attached to the object.
"""
if request_method == 'GET':
if params:
request_url += '&%s' % urlencode(params)
request = Request(request_url)
elif request_method == 'POST':
request = Request(request_url, urlencode(params, doseq=1))
return request | [
"def",
"_get_request",
"(",
"self",
",",
"request_url",
",",
"request_method",
",",
"*",
"*",
"params",
")",
":",
"if",
"request_method",
"==",
"'GET'",
":",
"if",
"params",
":",
"request_url",
"+=",
"'&%s'",
"%",
"urlencode",
"(",
"params",
")",
"request",
"=",
"Request",
"(",
"request_url",
")",
"elif",
"request_method",
"==",
"'POST'",
":",
"request",
"=",
"Request",
"(",
"request_url",
",",
"urlencode",
"(",
"params",
",",
"doseq",
"=",
"1",
")",
")",
"return",
"request"
] | Return a Request object that has the GET parameters
attached to the url or the POST data attached to the object. | [
"Return",
"a",
"Request",
"object",
"that",
"has",
"the",
"GET",
"parameters",
"attached",
"to",
"the",
"url",
"or",
"the",
"POST",
"data",
"attached",
"to",
"the",
"object",
"."
] | train | https://github.com/arthurk/django-disqus/blob/0db52c240906c6663189c0a7aca9979a0db004d1/disqus/api.py#L65-L76 | 0.003945 |
pwwang/liquidpy | liquid/__init__.py | Liquid.split | def split (s, delimter, trim = True, limit = 0): # pragma: no cover
"""
Split a string using a single-character delimter
@params:
`s`: the string
`delimter`: the single-character delimter
`trim`: whether to trim each part. Default: True
@examples:
```python
ret = split("'a,b',c", ",")
# ret == ["'a,b'", "c"]
# ',' inside quotes will be recognized.
```
@returns:
The list of substrings
"""
ret = []
special1 = ['(', ')', '[', ']', '{', '}']
special2 = ['\'', '"']
special3 = '\\'
flags1 = [0, 0, 0]
flags2 = [False, False]
flags3 = False
start = 0
nlim = 0
for i, c in enumerate(s):
if c == special3:
# next char is escaped
flags3 = not flags3
elif not flags3:
# no escape
if c in special1:
index = special1.index(c)
if index % 2 == 0:
flags1[int(index/2)] += 1
else:
flags1[int(index/2)] -= 1
elif c in special2:
index = special2.index(c)
flags2[index] = not flags2[index]
elif c == delimter and not any(flags1) and not any(flags2):
r = s[start:i]
if trim: r = r.strip()
ret.append(r)
start = i + 1
nlim = nlim + 1
if limit and nlim >= limit:
break
else:
# escaping closed
flags3 = False
r = s[start:]
if trim: r = r.strip()
ret.append(r)
return ret | python | def split (s, delimter, trim = True, limit = 0): # pragma: no cover
"""
Split a string using a single-character delimter
@params:
`s`: the string
`delimter`: the single-character delimter
`trim`: whether to trim each part. Default: True
@examples:
```python
ret = split("'a,b',c", ",")
# ret == ["'a,b'", "c"]
# ',' inside quotes will be recognized.
```
@returns:
The list of substrings
"""
ret = []
special1 = ['(', ')', '[', ']', '{', '}']
special2 = ['\'', '"']
special3 = '\\'
flags1 = [0, 0, 0]
flags2 = [False, False]
flags3 = False
start = 0
nlim = 0
for i, c in enumerate(s):
if c == special3:
# next char is escaped
flags3 = not flags3
elif not flags3:
# no escape
if c in special1:
index = special1.index(c)
if index % 2 == 0:
flags1[int(index/2)] += 1
else:
flags1[int(index/2)] -= 1
elif c in special2:
index = special2.index(c)
flags2[index] = not flags2[index]
elif c == delimter and not any(flags1) and not any(flags2):
r = s[start:i]
if trim: r = r.strip()
ret.append(r)
start = i + 1
nlim = nlim + 1
if limit and nlim >= limit:
break
else:
# escaping closed
flags3 = False
r = s[start:]
if trim: r = r.strip()
ret.append(r)
return ret | [
"def",
"split",
"(",
"s",
",",
"delimter",
",",
"trim",
"=",
"True",
",",
"limit",
"=",
"0",
")",
":",
"# pragma: no cover",
"ret",
"=",
"[",
"]",
"special1",
"=",
"[",
"'('",
",",
"')'",
",",
"'['",
",",
"']'",
",",
"'{'",
",",
"'}'",
"]",
"special2",
"=",
"[",
"'\\''",
",",
"'\"'",
"]",
"special3",
"=",
"'\\\\'",
"flags1",
"=",
"[",
"0",
",",
"0",
",",
"0",
"]",
"flags2",
"=",
"[",
"False",
",",
"False",
"]",
"flags3",
"=",
"False",
"start",
"=",
"0",
"nlim",
"=",
"0",
"for",
"i",
",",
"c",
"in",
"enumerate",
"(",
"s",
")",
":",
"if",
"c",
"==",
"special3",
":",
"# next char is escaped",
"flags3",
"=",
"not",
"flags3",
"elif",
"not",
"flags3",
":",
"# no escape",
"if",
"c",
"in",
"special1",
":",
"index",
"=",
"special1",
".",
"index",
"(",
"c",
")",
"if",
"index",
"%",
"2",
"==",
"0",
":",
"flags1",
"[",
"int",
"(",
"index",
"/",
"2",
")",
"]",
"+=",
"1",
"else",
":",
"flags1",
"[",
"int",
"(",
"index",
"/",
"2",
")",
"]",
"-=",
"1",
"elif",
"c",
"in",
"special2",
":",
"index",
"=",
"special2",
".",
"index",
"(",
"c",
")",
"flags2",
"[",
"index",
"]",
"=",
"not",
"flags2",
"[",
"index",
"]",
"elif",
"c",
"==",
"delimter",
"and",
"not",
"any",
"(",
"flags1",
")",
"and",
"not",
"any",
"(",
"flags2",
")",
":",
"r",
"=",
"s",
"[",
"start",
":",
"i",
"]",
"if",
"trim",
":",
"r",
"=",
"r",
".",
"strip",
"(",
")",
"ret",
".",
"append",
"(",
"r",
")",
"start",
"=",
"i",
"+",
"1",
"nlim",
"=",
"nlim",
"+",
"1",
"if",
"limit",
"and",
"nlim",
">=",
"limit",
":",
"break",
"else",
":",
"# escaping closed",
"flags3",
"=",
"False",
"r",
"=",
"s",
"[",
"start",
":",
"]",
"if",
"trim",
":",
"r",
"=",
"r",
".",
"strip",
"(",
")",
"ret",
".",
"append",
"(",
"r",
")",
"return",
"ret"
] | Split a string using a single-character delimter
@params:
`s`: the string
`delimter`: the single-character delimter
`trim`: whether to trim each part. Default: True
@examples:
```python
ret = split("'a,b',c", ",")
# ret == ["'a,b'", "c"]
# ',' inside quotes will be recognized.
```
@returns:
The list of substrings | [
"Split",
"a",
"string",
"using",
"a",
"single",
"-",
"character",
"delimter"
] | train | https://github.com/pwwang/liquidpy/blob/f422af836740b7facfbc6b89e5162a17d619dd07/liquid/__init__.py#L367-L421 | 0.050112 |
googledatalab/pydatalab | datalab/data/commands/_sql.py | _split_cell | def _split_cell(cell, module):
""" Split a hybrid %%sql cell into the Python code and the queries.
Populates a module with the queries.
Args:
cell: the contents of the %%sql cell.
module: the module that the contents will populate.
Returns:
The default (last) query for the module.
"""
lines = cell.split('\n')
code = None
last_def = -1
name = None
define_wild_re = re.compile('^DEFINE\s+.*$', re.IGNORECASE)
define_re = re.compile('^DEFINE\s+QUERY\s+([A-Z]\w*)\s*?(.*)$', re.IGNORECASE)
select_re = re.compile('^SELECT\s*.*$', re.IGNORECASE)
standard_sql_re = re.compile('^(CREATE|WITH|INSERT|DELETE|UPDATE)\s*.*$', re.IGNORECASE)
# TODO(gram): a potential issue with this code is if we have leading Python code followed
# by a SQL-style comment before we see SELECT/DEFINE. When switching to the tokenizer see
# if we can address this.
for i, line in enumerate(lines):
define_match = define_re.match(line)
select_match = select_re.match(line)
standard_sql_match = standard_sql_re.match(line)
if i:
prior_content = ''.join(lines[:i]).strip()
if select_match:
# Avoid matching if previous token was '(' or if Standard SQL is found
# TODO: handle the possibility of comments immediately preceding SELECT
select_match = len(prior_content) == 0 or \
(prior_content[-1] != '(' and not standard_sql_re.match(prior_content))
if standard_sql_match:
standard_sql_match = len(prior_content) == 0 or not standard_sql_re.match(prior_content)
if define_match or select_match or standard_sql_match:
# If this is the first query, get the preceding Python code.
if code is None:
code = ('\n'.join(lines[:i])).strip()
if len(code):
code += '\n'
elif last_def >= 0:
# This is not the first query, so gather the previous query text.
query = '\n'.join([line for line in lines[last_def:i] if len(line)]).strip()
if select_match and name != datalab.data._utils._SQL_MODULE_MAIN and len(query) == 0:
# Avoid DEFINE query name\nSELECT ... being seen as an empty DEFINE followed by SELECT
continue
# Save the query
statement = datalab.data.SqlStatement(query, module)
module.__dict__[name] = statement
# And set the 'last' query to be this too
module.__dict__[datalab.data._utils._SQL_MODULE_LAST] = statement
# Get the query name and strip off our syntactic sugar if appropriate.
if define_match:
name = define_match.group(1)
lines[i] = define_match.group(2)
else:
name = datalab.data._utils._SQL_MODULE_MAIN
# Save the starting line index of the new query
last_def = i
else:
define_wild_match = define_wild_re.match(line)
if define_wild_match:
raise Exception('Expected "DEFINE QUERY <name>"')
if last_def >= 0:
# We were in a query so save this tail query.
query = '\n'.join([line for line in lines[last_def:] if len(line)]).strip()
statement = datalab.data.SqlStatement(query, module)
module.__dict__[name] = statement
module.__dict__[datalab.data._utils._SQL_MODULE_LAST] = statement
if code is None:
code = ''
module.__dict__[datalab.data._utils._SQL_MODULE_ARGPARSE] = _arguments(code, module)
return module.__dict__.get(datalab.data._utils._SQL_MODULE_LAST, None) | python | def _split_cell(cell, module):
""" Split a hybrid %%sql cell into the Python code and the queries.
Populates a module with the queries.
Args:
cell: the contents of the %%sql cell.
module: the module that the contents will populate.
Returns:
The default (last) query for the module.
"""
lines = cell.split('\n')
code = None
last_def = -1
name = None
define_wild_re = re.compile('^DEFINE\s+.*$', re.IGNORECASE)
define_re = re.compile('^DEFINE\s+QUERY\s+([A-Z]\w*)\s*?(.*)$', re.IGNORECASE)
select_re = re.compile('^SELECT\s*.*$', re.IGNORECASE)
standard_sql_re = re.compile('^(CREATE|WITH|INSERT|DELETE|UPDATE)\s*.*$', re.IGNORECASE)
# TODO(gram): a potential issue with this code is if we have leading Python code followed
# by a SQL-style comment before we see SELECT/DEFINE. When switching to the tokenizer see
# if we can address this.
for i, line in enumerate(lines):
define_match = define_re.match(line)
select_match = select_re.match(line)
standard_sql_match = standard_sql_re.match(line)
if i:
prior_content = ''.join(lines[:i]).strip()
if select_match:
# Avoid matching if previous token was '(' or if Standard SQL is found
# TODO: handle the possibility of comments immediately preceding SELECT
select_match = len(prior_content) == 0 or \
(prior_content[-1] != '(' and not standard_sql_re.match(prior_content))
if standard_sql_match:
standard_sql_match = len(prior_content) == 0 or not standard_sql_re.match(prior_content)
if define_match or select_match or standard_sql_match:
# If this is the first query, get the preceding Python code.
if code is None:
code = ('\n'.join(lines[:i])).strip()
if len(code):
code += '\n'
elif last_def >= 0:
# This is not the first query, so gather the previous query text.
query = '\n'.join([line for line in lines[last_def:i] if len(line)]).strip()
if select_match and name != datalab.data._utils._SQL_MODULE_MAIN and len(query) == 0:
# Avoid DEFINE query name\nSELECT ... being seen as an empty DEFINE followed by SELECT
continue
# Save the query
statement = datalab.data.SqlStatement(query, module)
module.__dict__[name] = statement
# And set the 'last' query to be this too
module.__dict__[datalab.data._utils._SQL_MODULE_LAST] = statement
# Get the query name and strip off our syntactic sugar if appropriate.
if define_match:
name = define_match.group(1)
lines[i] = define_match.group(2)
else:
name = datalab.data._utils._SQL_MODULE_MAIN
# Save the starting line index of the new query
last_def = i
else:
define_wild_match = define_wild_re.match(line)
if define_wild_match:
raise Exception('Expected "DEFINE QUERY <name>"')
if last_def >= 0:
# We were in a query so save this tail query.
query = '\n'.join([line for line in lines[last_def:] if len(line)]).strip()
statement = datalab.data.SqlStatement(query, module)
module.__dict__[name] = statement
module.__dict__[datalab.data._utils._SQL_MODULE_LAST] = statement
if code is None:
code = ''
module.__dict__[datalab.data._utils._SQL_MODULE_ARGPARSE] = _arguments(code, module)
return module.__dict__.get(datalab.data._utils._SQL_MODULE_LAST, None) | [
"def",
"_split_cell",
"(",
"cell",
",",
"module",
")",
":",
"lines",
"=",
"cell",
".",
"split",
"(",
"'\\n'",
")",
"code",
"=",
"None",
"last_def",
"=",
"-",
"1",
"name",
"=",
"None",
"define_wild_re",
"=",
"re",
".",
"compile",
"(",
"'^DEFINE\\s+.*$'",
",",
"re",
".",
"IGNORECASE",
")",
"define_re",
"=",
"re",
".",
"compile",
"(",
"'^DEFINE\\s+QUERY\\s+([A-Z]\\w*)\\s*?(.*)$'",
",",
"re",
".",
"IGNORECASE",
")",
"select_re",
"=",
"re",
".",
"compile",
"(",
"'^SELECT\\s*.*$'",
",",
"re",
".",
"IGNORECASE",
")",
"standard_sql_re",
"=",
"re",
".",
"compile",
"(",
"'^(CREATE|WITH|INSERT|DELETE|UPDATE)\\s*.*$'",
",",
"re",
".",
"IGNORECASE",
")",
"# TODO(gram): a potential issue with this code is if we have leading Python code followed",
"# by a SQL-style comment before we see SELECT/DEFINE. When switching to the tokenizer see",
"# if we can address this.",
"for",
"i",
",",
"line",
"in",
"enumerate",
"(",
"lines",
")",
":",
"define_match",
"=",
"define_re",
".",
"match",
"(",
"line",
")",
"select_match",
"=",
"select_re",
".",
"match",
"(",
"line",
")",
"standard_sql_match",
"=",
"standard_sql_re",
".",
"match",
"(",
"line",
")",
"if",
"i",
":",
"prior_content",
"=",
"''",
".",
"join",
"(",
"lines",
"[",
":",
"i",
"]",
")",
".",
"strip",
"(",
")",
"if",
"select_match",
":",
"# Avoid matching if previous token was '(' or if Standard SQL is found",
"# TODO: handle the possibility of comments immediately preceding SELECT",
"select_match",
"=",
"len",
"(",
"prior_content",
")",
"==",
"0",
"or",
"(",
"prior_content",
"[",
"-",
"1",
"]",
"!=",
"'('",
"and",
"not",
"standard_sql_re",
".",
"match",
"(",
"prior_content",
")",
")",
"if",
"standard_sql_match",
":",
"standard_sql_match",
"=",
"len",
"(",
"prior_content",
")",
"==",
"0",
"or",
"not",
"standard_sql_re",
".",
"match",
"(",
"prior_content",
")",
"if",
"define_match",
"or",
"select_match",
"or",
"standard_sql_match",
":",
"# If this is the first query, get the preceding Python code.",
"if",
"code",
"is",
"None",
":",
"code",
"=",
"(",
"'\\n'",
".",
"join",
"(",
"lines",
"[",
":",
"i",
"]",
")",
")",
".",
"strip",
"(",
")",
"if",
"len",
"(",
"code",
")",
":",
"code",
"+=",
"'\\n'",
"elif",
"last_def",
">=",
"0",
":",
"# This is not the first query, so gather the previous query text.",
"query",
"=",
"'\\n'",
".",
"join",
"(",
"[",
"line",
"for",
"line",
"in",
"lines",
"[",
"last_def",
":",
"i",
"]",
"if",
"len",
"(",
"line",
")",
"]",
")",
".",
"strip",
"(",
")",
"if",
"select_match",
"and",
"name",
"!=",
"datalab",
".",
"data",
".",
"_utils",
".",
"_SQL_MODULE_MAIN",
"and",
"len",
"(",
"query",
")",
"==",
"0",
":",
"# Avoid DEFINE query name\\nSELECT ... being seen as an empty DEFINE followed by SELECT",
"continue",
"# Save the query",
"statement",
"=",
"datalab",
".",
"data",
".",
"SqlStatement",
"(",
"query",
",",
"module",
")",
"module",
".",
"__dict__",
"[",
"name",
"]",
"=",
"statement",
"# And set the 'last' query to be this too",
"module",
".",
"__dict__",
"[",
"datalab",
".",
"data",
".",
"_utils",
".",
"_SQL_MODULE_LAST",
"]",
"=",
"statement",
"# Get the query name and strip off our syntactic sugar if appropriate.",
"if",
"define_match",
":",
"name",
"=",
"define_match",
".",
"group",
"(",
"1",
")",
"lines",
"[",
"i",
"]",
"=",
"define_match",
".",
"group",
"(",
"2",
")",
"else",
":",
"name",
"=",
"datalab",
".",
"data",
".",
"_utils",
".",
"_SQL_MODULE_MAIN",
"# Save the starting line index of the new query",
"last_def",
"=",
"i",
"else",
":",
"define_wild_match",
"=",
"define_wild_re",
".",
"match",
"(",
"line",
")",
"if",
"define_wild_match",
":",
"raise",
"Exception",
"(",
"'Expected \"DEFINE QUERY <name>\"'",
")",
"if",
"last_def",
">=",
"0",
":",
"# We were in a query so save this tail query.",
"query",
"=",
"'\\n'",
".",
"join",
"(",
"[",
"line",
"for",
"line",
"in",
"lines",
"[",
"last_def",
":",
"]",
"if",
"len",
"(",
"line",
")",
"]",
")",
".",
"strip",
"(",
")",
"statement",
"=",
"datalab",
".",
"data",
".",
"SqlStatement",
"(",
"query",
",",
"module",
")",
"module",
".",
"__dict__",
"[",
"name",
"]",
"=",
"statement",
"module",
".",
"__dict__",
"[",
"datalab",
".",
"data",
".",
"_utils",
".",
"_SQL_MODULE_LAST",
"]",
"=",
"statement",
"if",
"code",
"is",
"None",
":",
"code",
"=",
"''",
"module",
".",
"__dict__",
"[",
"datalab",
".",
"data",
".",
"_utils",
".",
"_SQL_MODULE_ARGPARSE",
"]",
"=",
"_arguments",
"(",
"code",
",",
"module",
")",
"return",
"module",
".",
"__dict__",
".",
"get",
"(",
"datalab",
".",
"data",
".",
"_utils",
".",
"_SQL_MODULE_LAST",
",",
"None",
")"
] | Split a hybrid %%sql cell into the Python code and the queries.
Populates a module with the queries.
Args:
cell: the contents of the %%sql cell.
module: the module that the contents will populate.
Returns:
The default (last) query for the module. | [
"Split",
"a",
"hybrid",
"%%sql",
"cell",
"into",
"the",
"Python",
"code",
"and",
"the",
"queries",
"."
] | train | https://github.com/googledatalab/pydatalab/blob/d9031901d5bca22fe0d5925d204e6698df9852e1/datalab/data/commands/_sql.py#L284-L367 | 0.014882 |
evhub/coconut | coconut/compiler/compiler.py | Compiler.wrap_str | def wrap_str(self, text, strchar, multiline=False):
"""Wrap a string."""
if multiline:
strchar *= 3
return strwrapper + self.add_ref("str", (text, strchar)) + unwrapper | python | def wrap_str(self, text, strchar, multiline=False):
"""Wrap a string."""
if multiline:
strchar *= 3
return strwrapper + self.add_ref("str", (text, strchar)) + unwrapper | [
"def",
"wrap_str",
"(",
"self",
",",
"text",
",",
"strchar",
",",
"multiline",
"=",
"False",
")",
":",
"if",
"multiline",
":",
"strchar",
"*=",
"3",
"return",
"strwrapper",
"+",
"self",
".",
"add_ref",
"(",
"\"str\"",
",",
"(",
"text",
",",
"strchar",
")",
")",
"+",
"unwrapper"
] | Wrap a string. | [
"Wrap",
"a",
"string",
"."
] | train | https://github.com/evhub/coconut/blob/ff97177344e7604e89a0a98a977a87ed2a56fc6d/coconut/compiler/compiler.py#L525-L529 | 0.009804 |
liip/taxi | taxi/commands/base.py | date_options | def date_options(func):
"""
Decorator to add support for `--today/--not-today`, `--from` and `--to` options to the given command. The
calculated date is then passed as a parameter named `date`.
"""
@click.option(
'--until', type=Date(), help="Only show entries until the given date."
)
@click.option(
'--since', type=Date(), help="Only show entries starting at the given date.",
)
@click.option(
'--today/--not-today', default=None, help="Only include today's entries (same as --since=today --until=today)"
" or ignore today's entries (same as --until=yesterday)"
)
@functools.wraps(func)
def wrapper(*args, **kwargs):
since, until, today = kwargs.pop('since'), kwargs.pop('until'), kwargs.pop('today')
if today is not None:
if today:
date = datetime.date.today()
else:
date = (None, datetime.date.today() - datetime.timedelta(days=1))
elif since is not None or until is not None:
date = (since, until)
else:
date = None
kwargs['date'] = date
return func(*args, **kwargs)
return wrapper | python | def date_options(func):
"""
Decorator to add support for `--today/--not-today`, `--from` and `--to` options to the given command. The
calculated date is then passed as a parameter named `date`.
"""
@click.option(
'--until', type=Date(), help="Only show entries until the given date."
)
@click.option(
'--since', type=Date(), help="Only show entries starting at the given date.",
)
@click.option(
'--today/--not-today', default=None, help="Only include today's entries (same as --since=today --until=today)"
" or ignore today's entries (same as --until=yesterday)"
)
@functools.wraps(func)
def wrapper(*args, **kwargs):
since, until, today = kwargs.pop('since'), kwargs.pop('until'), kwargs.pop('today')
if today is not None:
if today:
date = datetime.date.today()
else:
date = (None, datetime.date.today() - datetime.timedelta(days=1))
elif since is not None or until is not None:
date = (since, until)
else:
date = None
kwargs['date'] = date
return func(*args, **kwargs)
return wrapper | [
"def",
"date_options",
"(",
"func",
")",
":",
"@",
"click",
".",
"option",
"(",
"'--until'",
",",
"type",
"=",
"Date",
"(",
")",
",",
"help",
"=",
"\"Only show entries until the given date.\"",
")",
"@",
"click",
".",
"option",
"(",
"'--since'",
",",
"type",
"=",
"Date",
"(",
")",
",",
"help",
"=",
"\"Only show entries starting at the given date.\"",
",",
")",
"@",
"click",
".",
"option",
"(",
"'--today/--not-today'",
",",
"default",
"=",
"None",
",",
"help",
"=",
"\"Only include today's entries (same as --since=today --until=today)\"",
"\" or ignore today's entries (same as --until=yesterday)\"",
")",
"@",
"functools",
".",
"wraps",
"(",
"func",
")",
"def",
"wrapper",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"since",
",",
"until",
",",
"today",
"=",
"kwargs",
".",
"pop",
"(",
"'since'",
")",
",",
"kwargs",
".",
"pop",
"(",
"'until'",
")",
",",
"kwargs",
".",
"pop",
"(",
"'today'",
")",
"if",
"today",
"is",
"not",
"None",
":",
"if",
"today",
":",
"date",
"=",
"datetime",
".",
"date",
".",
"today",
"(",
")",
"else",
":",
"date",
"=",
"(",
"None",
",",
"datetime",
".",
"date",
".",
"today",
"(",
")",
"-",
"datetime",
".",
"timedelta",
"(",
"days",
"=",
"1",
")",
")",
"elif",
"since",
"is",
"not",
"None",
"or",
"until",
"is",
"not",
"None",
":",
"date",
"=",
"(",
"since",
",",
"until",
")",
"else",
":",
"date",
"=",
"None",
"kwargs",
"[",
"'date'",
"]",
"=",
"date",
"return",
"func",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"return",
"wrapper"
] | Decorator to add support for `--today/--not-today`, `--from` and `--to` options to the given command. The
calculated date is then passed as a parameter named `date`. | [
"Decorator",
"to",
"add",
"support",
"for",
"--",
"today",
"/",
"--",
"not",
"-",
"today",
"--",
"from",
"and",
"--",
"to",
"options",
"to",
"the",
"given",
"command",
".",
"The",
"calculated",
"date",
"is",
"then",
"passed",
"as",
"a",
"parameter",
"named",
"date",
"."
] | train | https://github.com/liip/taxi/blob/269423c1f1ab571bd01a522819afe3e325bfbff6/taxi/commands/base.py#L191-L224 | 0.004992 |
seleniumbase/SeleniumBase | seleniumbase/fixtures/base_case.py | BaseCase.create_folder | def create_folder(self, folder):
""" Creates a folder of the given name if it doesn't already exist. """
if folder.endswith("/"):
folder = folder[:-1]
if len(folder) < 1:
raise Exception("Minimum folder name length = 1.")
if not os.path.exists(folder):
try:
os.makedirs(folder)
except Exception:
pass | python | def create_folder(self, folder):
""" Creates a folder of the given name if it doesn't already exist. """
if folder.endswith("/"):
folder = folder[:-1]
if len(folder) < 1:
raise Exception("Minimum folder name length = 1.")
if not os.path.exists(folder):
try:
os.makedirs(folder)
except Exception:
pass | [
"def",
"create_folder",
"(",
"self",
",",
"folder",
")",
":",
"if",
"folder",
".",
"endswith",
"(",
"\"/\"",
")",
":",
"folder",
"=",
"folder",
"[",
":",
"-",
"1",
"]",
"if",
"len",
"(",
"folder",
")",
"<",
"1",
":",
"raise",
"Exception",
"(",
"\"Minimum folder name length = 1.\"",
")",
"if",
"not",
"os",
".",
"path",
".",
"exists",
"(",
"folder",
")",
":",
"try",
":",
"os",
".",
"makedirs",
"(",
"folder",
")",
"except",
"Exception",
":",
"pass"
] | Creates a folder of the given name if it doesn't already exist. | [
"Creates",
"a",
"folder",
"of",
"the",
"given",
"name",
"if",
"it",
"doesn",
"t",
"already",
"exist",
"."
] | train | https://github.com/seleniumbase/SeleniumBase/blob/62e5b43ee1f90a9ed923841bdd53b1b38358f43a/seleniumbase/fixtures/base_case.py#L1683-L1693 | 0.004854 |
openstack/networking-arista | networking_arista/ml2/mechanism_arista.py | AristaDriver.get_instance_type | def get_instance_type(self, port):
"""Determine the port type based on device owner and vnic type"""
if port[portbindings.VNIC_TYPE] == portbindings.VNIC_BAREMETAL:
return a_const.BAREMETAL_RESOURCE
owner_to_type = {
n_const.DEVICE_OWNER_DHCP: a_const.DHCP_RESOURCE,
n_const.DEVICE_OWNER_DVR_INTERFACE: a_const.ROUTER_RESOURCE,
trunk_consts.TRUNK_SUBPORT_OWNER: a_const.VM_RESOURCE}
if port['device_owner'] in owner_to_type.keys():
return owner_to_type[port['device_owner']]
elif port['device_owner'].startswith(
n_const.DEVICE_OWNER_COMPUTE_PREFIX):
return a_const.VM_RESOURCE
return None | python | def get_instance_type(self, port):
"""Determine the port type based on device owner and vnic type"""
if port[portbindings.VNIC_TYPE] == portbindings.VNIC_BAREMETAL:
return a_const.BAREMETAL_RESOURCE
owner_to_type = {
n_const.DEVICE_OWNER_DHCP: a_const.DHCP_RESOURCE,
n_const.DEVICE_OWNER_DVR_INTERFACE: a_const.ROUTER_RESOURCE,
trunk_consts.TRUNK_SUBPORT_OWNER: a_const.VM_RESOURCE}
if port['device_owner'] in owner_to_type.keys():
return owner_to_type[port['device_owner']]
elif port['device_owner'].startswith(
n_const.DEVICE_OWNER_COMPUTE_PREFIX):
return a_const.VM_RESOURCE
return None | [
"def",
"get_instance_type",
"(",
"self",
",",
"port",
")",
":",
"if",
"port",
"[",
"portbindings",
".",
"VNIC_TYPE",
"]",
"==",
"portbindings",
".",
"VNIC_BAREMETAL",
":",
"return",
"a_const",
".",
"BAREMETAL_RESOURCE",
"owner_to_type",
"=",
"{",
"n_const",
".",
"DEVICE_OWNER_DHCP",
":",
"a_const",
".",
"DHCP_RESOURCE",
",",
"n_const",
".",
"DEVICE_OWNER_DVR_INTERFACE",
":",
"a_const",
".",
"ROUTER_RESOURCE",
",",
"trunk_consts",
".",
"TRUNK_SUBPORT_OWNER",
":",
"a_const",
".",
"VM_RESOURCE",
"}",
"if",
"port",
"[",
"'device_owner'",
"]",
"in",
"owner_to_type",
".",
"keys",
"(",
")",
":",
"return",
"owner_to_type",
"[",
"port",
"[",
"'device_owner'",
"]",
"]",
"elif",
"port",
"[",
"'device_owner'",
"]",
".",
"startswith",
"(",
"n_const",
".",
"DEVICE_OWNER_COMPUTE_PREFIX",
")",
":",
"return",
"a_const",
".",
"VM_RESOURCE",
"return",
"None"
] | Determine the port type based on device owner and vnic type | [
"Determine",
"the",
"port",
"type",
"based",
"on",
"device",
"owner",
"and",
"vnic",
"type"
] | train | https://github.com/openstack/networking-arista/blob/07ce6b1fc62ff74308a6eabfc4cc0ee09fb7b0fe/networking_arista/ml2/mechanism_arista.py#L117-L130 | 0.002759 |
jim-easterbrook/pyctools | src/pyctools/core/compound.py | Compound.output_connections | def output_connections(self, name):
"""Yield ordered list of connections to one child.
Each result is a ((component, output), (component, input)) tuple.
:param string name: the component whose output connections are
wanted.
"""
for output_name in self._compound_children[name].outputs:
src = name, output_name
if src in self._compound_linkages:
dests = self._compound_linkages[src]
if isinstance(dests[0], six.string_types):
dests = zip(dests[0::2], dests[1::2])
for dest in dests:
yield src, dest | python | def output_connections(self, name):
"""Yield ordered list of connections to one child.
Each result is a ((component, output), (component, input)) tuple.
:param string name: the component whose output connections are
wanted.
"""
for output_name in self._compound_children[name].outputs:
src = name, output_name
if src in self._compound_linkages:
dests = self._compound_linkages[src]
if isinstance(dests[0], six.string_types):
dests = zip(dests[0::2], dests[1::2])
for dest in dests:
yield src, dest | [
"def",
"output_connections",
"(",
"self",
",",
"name",
")",
":",
"for",
"output_name",
"in",
"self",
".",
"_compound_children",
"[",
"name",
"]",
".",
"outputs",
":",
"src",
"=",
"name",
",",
"output_name",
"if",
"src",
"in",
"self",
".",
"_compound_linkages",
":",
"dests",
"=",
"self",
".",
"_compound_linkages",
"[",
"src",
"]",
"if",
"isinstance",
"(",
"dests",
"[",
"0",
"]",
",",
"six",
".",
"string_types",
")",
":",
"dests",
"=",
"zip",
"(",
"dests",
"[",
"0",
":",
":",
"2",
"]",
",",
"dests",
"[",
"1",
":",
":",
"2",
"]",
")",
"for",
"dest",
"in",
"dests",
":",
"yield",
"src",
",",
"dest"
] | Yield ordered list of connections to one child.
Each result is a ((component, output), (component, input)) tuple.
:param string name: the component whose output connections are
wanted. | [
"Yield",
"ordered",
"list",
"of",
"connections",
"to",
"one",
"child",
"."
] | train | https://github.com/jim-easterbrook/pyctools/blob/2a958665326892f45f249bebe62c2c23f306732b/src/pyctools/core/compound.py#L258-L274 | 0.003012 |
msiedlarek/wiring | wiring/interface.py | add_implemented_interfaces | def add_implemented_interfaces(cls, interfaces):
"""
Adds :term:`interfaces <interface>` to those already declared as
implemented by class `cls`.
"""
implemented = set(
six.moves.reduce(
lambda x, y: x.union(y),
map(operator.attrgetter('implied'), interfaces),
set()
)
)
implemented.update(*map(
get_implemented_interfaces,
inspect.getmro(cls)
))
setattr(cls, '__interfaces__', frozenset(implemented)) | python | def add_implemented_interfaces(cls, interfaces):
"""
Adds :term:`interfaces <interface>` to those already declared as
implemented by class `cls`.
"""
implemented = set(
six.moves.reduce(
lambda x, y: x.union(y),
map(operator.attrgetter('implied'), interfaces),
set()
)
)
implemented.update(*map(
get_implemented_interfaces,
inspect.getmro(cls)
))
setattr(cls, '__interfaces__', frozenset(implemented)) | [
"def",
"add_implemented_interfaces",
"(",
"cls",
",",
"interfaces",
")",
":",
"implemented",
"=",
"set",
"(",
"six",
".",
"moves",
".",
"reduce",
"(",
"lambda",
"x",
",",
"y",
":",
"x",
".",
"union",
"(",
"y",
")",
",",
"map",
"(",
"operator",
".",
"attrgetter",
"(",
"'implied'",
")",
",",
"interfaces",
")",
",",
"set",
"(",
")",
")",
")",
"implemented",
".",
"update",
"(",
"*",
"map",
"(",
"get_implemented_interfaces",
",",
"inspect",
".",
"getmro",
"(",
"cls",
")",
")",
")",
"setattr",
"(",
"cls",
",",
"'__interfaces__'",
",",
"frozenset",
"(",
"implemented",
")",
")"
] | Adds :term:`interfaces <interface>` to those already declared as
implemented by class `cls`. | [
"Adds",
":",
"term",
":",
"interfaces",
"<interface",
">",
"to",
"those",
"already",
"declared",
"as",
"implemented",
"by",
"class",
"cls",
"."
] | train | https://github.com/msiedlarek/wiring/blob/c32165b680356fe9f1e422a1d11127f867065f94/wiring/interface.py#L322-L338 | 0.00198 |
pydata/xarray | xarray/backends/file_manager.py | CachingFileManager.close | def close(self, needs_lock=True):
"""Explicitly close any associated file object (if necessary)."""
# TODO: remove needs_lock if/when we have a reentrant lock in
# dask.distributed: https://github.com/dask/dask/issues/3832
with self._optional_lock(needs_lock):
default = None
file = self._cache.pop(self._key, default)
if file is not None:
file.close() | python | def close(self, needs_lock=True):
"""Explicitly close any associated file object (if necessary)."""
# TODO: remove needs_lock if/when we have a reentrant lock in
# dask.distributed: https://github.com/dask/dask/issues/3832
with self._optional_lock(needs_lock):
default = None
file = self._cache.pop(self._key, default)
if file is not None:
file.close() | [
"def",
"close",
"(",
"self",
",",
"needs_lock",
"=",
"True",
")",
":",
"# TODO: remove needs_lock if/when we have a reentrant lock in",
"# dask.distributed: https://github.com/dask/dask/issues/3832",
"with",
"self",
".",
"_optional_lock",
"(",
"needs_lock",
")",
":",
"default",
"=",
"None",
"file",
"=",
"self",
".",
"_cache",
".",
"pop",
"(",
"self",
".",
"_key",
",",
"default",
")",
"if",
"file",
"is",
"not",
"None",
":",
"file",
".",
"close",
"(",
")"
] | Explicitly close any associated file object (if necessary). | [
"Explicitly",
"close",
"any",
"associated",
"file",
"object",
"(",
"if",
"necessary",
")",
"."
] | train | https://github.com/pydata/xarray/blob/6d93a95d05bdbfc33fff24064f67d29dd891ab58/xarray/backends/file_manager.py#L180-L188 | 0.004587 |
pjuren/pyokit | src/pyokit/datastruct/read.py | NGSRead.to_fastq_str | def to_fastq_str(self):
"""
:return: string representation of this NGS read in FastQ format
"""
return "@" + self.name + "\n" + self.sequenceData +\
"\n" + "+" + self.name + "\n" + self.seq_qual | python | def to_fastq_str(self):
"""
:return: string representation of this NGS read in FastQ format
"""
return "@" + self.name + "\n" + self.sequenceData +\
"\n" + "+" + self.name + "\n" + self.seq_qual | [
"def",
"to_fastq_str",
"(",
"self",
")",
":",
"return",
"\"@\"",
"+",
"self",
".",
"name",
"+",
"\"\\n\"",
"+",
"self",
".",
"sequenceData",
"+",
"\"\\n\"",
"+",
"\"+\"",
"+",
"self",
".",
"name",
"+",
"\"\\n\"",
"+",
"self",
".",
"seq_qual"
] | :return: string representation of this NGS read in FastQ format | [
":",
"return",
":",
"string",
"representation",
"of",
"this",
"NGS",
"read",
"in",
"FastQ",
"format"
] | train | https://github.com/pjuren/pyokit/blob/fddae123b5d817daa39496183f19c000d9c3791f/src/pyokit/datastruct/read.py#L264-L269 | 0.004525 |
postlund/pyatv | pyatv/airplay/srp.py | new_credentials | def new_credentials():
"""Generate a new identifier and seed for authentication.
Use the returned values in the following way:
* The identifier shall be passed as username to SRPAuthHandler.step1
* Seed shall be passed to SRPAuthHandler constructor
"""
identifier = binascii.b2a_hex(os.urandom(8)).decode().upper()
seed = binascii.b2a_hex(os.urandom(32)) # Corresponds to private key
return identifier, seed | python | def new_credentials():
"""Generate a new identifier and seed for authentication.
Use the returned values in the following way:
* The identifier shall be passed as username to SRPAuthHandler.step1
* Seed shall be passed to SRPAuthHandler constructor
"""
identifier = binascii.b2a_hex(os.urandom(8)).decode().upper()
seed = binascii.b2a_hex(os.urandom(32)) # Corresponds to private key
return identifier, seed | [
"def",
"new_credentials",
"(",
")",
":",
"identifier",
"=",
"binascii",
".",
"b2a_hex",
"(",
"os",
".",
"urandom",
"(",
"8",
")",
")",
".",
"decode",
"(",
")",
".",
"upper",
"(",
")",
"seed",
"=",
"binascii",
".",
"b2a_hex",
"(",
"os",
".",
"urandom",
"(",
"32",
")",
")",
"# Corresponds to private key",
"return",
"identifier",
",",
"seed"
] | Generate a new identifier and seed for authentication.
Use the returned values in the following way:
* The identifier shall be passed as username to SRPAuthHandler.step1
* Seed shall be passed to SRPAuthHandler constructor | [
"Generate",
"a",
"new",
"identifier",
"and",
"seed",
"for",
"authentication",
"."
] | train | https://github.com/postlund/pyatv/blob/655dfcda4e2f9d1c501540e18da4f480d8bf0e70/pyatv/airplay/srp.py#L48-L57 | 0.002268 |
pyviz/param | param/ipython.py | ParamPager._build_table | def _build_table(self, info, order, max_col_len=40, only_changed=False):
"""
Collect the information about parameters needed to build a
properly formatted table and then tabulate it.
"""
info_dict, bounds_dict = {}, {}
(params, val_dict, changed) = info
col_widths = dict((k,0) for k in order)
for name, p in params.items():
if only_changed and not (name in changed):
continue
constant = 'C' if p.constant else 'V'
readonly = 'RO' if p.readonly else 'RW'
allow_None = ' AN' if hasattr(p, 'allow_None') and p.allow_None else ''
mode = '%s %s%s' % (constant, readonly, allow_None)
info_dict[name] = {'name': name, 'type':p.__class__.__name__,
'mode':mode}
if hasattr(p, 'bounds'):
lbound, ubound = (None,None) if p.bounds is None else p.bounds
mark_lbound, mark_ubound = False, False
# Use soft_bounds when bounds not defined.
if hasattr(p, 'get_soft_bounds'):
soft_lbound, soft_ubound = p.get_soft_bounds()
if lbound is None and soft_lbound is not None:
lbound = soft_lbound
mark_lbound = True
if ubound is None and soft_ubound is not None:
ubound = soft_ubound
mark_ubound = True
if (lbound, ubound) != (None,None):
bounds_dict[name] = (mark_lbound, mark_ubound)
info_dict[name]['bounds'] = '(%s, %s)' % (lbound, ubound)
value = repr(val_dict[name])
if len(value) > (max_col_len - 3):
value = value[:max_col_len-3] + '...'
info_dict[name]['value'] = value
for col in info_dict[name]:
max_width = max([col_widths[col], len(info_dict[name][col])])
col_widths[col] = max_width
return self._tabulate(info_dict, col_widths, changed, order, bounds_dict) | python | def _build_table(self, info, order, max_col_len=40, only_changed=False):
"""
Collect the information about parameters needed to build a
properly formatted table and then tabulate it.
"""
info_dict, bounds_dict = {}, {}
(params, val_dict, changed) = info
col_widths = dict((k,0) for k in order)
for name, p in params.items():
if only_changed and not (name in changed):
continue
constant = 'C' if p.constant else 'V'
readonly = 'RO' if p.readonly else 'RW'
allow_None = ' AN' if hasattr(p, 'allow_None') and p.allow_None else ''
mode = '%s %s%s' % (constant, readonly, allow_None)
info_dict[name] = {'name': name, 'type':p.__class__.__name__,
'mode':mode}
if hasattr(p, 'bounds'):
lbound, ubound = (None,None) if p.bounds is None else p.bounds
mark_lbound, mark_ubound = False, False
# Use soft_bounds when bounds not defined.
if hasattr(p, 'get_soft_bounds'):
soft_lbound, soft_ubound = p.get_soft_bounds()
if lbound is None and soft_lbound is not None:
lbound = soft_lbound
mark_lbound = True
if ubound is None and soft_ubound is not None:
ubound = soft_ubound
mark_ubound = True
if (lbound, ubound) != (None,None):
bounds_dict[name] = (mark_lbound, mark_ubound)
info_dict[name]['bounds'] = '(%s, %s)' % (lbound, ubound)
value = repr(val_dict[name])
if len(value) > (max_col_len - 3):
value = value[:max_col_len-3] + '...'
info_dict[name]['value'] = value
for col in info_dict[name]:
max_width = max([col_widths[col], len(info_dict[name][col])])
col_widths[col] = max_width
return self._tabulate(info_dict, col_widths, changed, order, bounds_dict) | [
"def",
"_build_table",
"(",
"self",
",",
"info",
",",
"order",
",",
"max_col_len",
"=",
"40",
",",
"only_changed",
"=",
"False",
")",
":",
"info_dict",
",",
"bounds_dict",
"=",
"{",
"}",
",",
"{",
"}",
"(",
"params",
",",
"val_dict",
",",
"changed",
")",
"=",
"info",
"col_widths",
"=",
"dict",
"(",
"(",
"k",
",",
"0",
")",
"for",
"k",
"in",
"order",
")",
"for",
"name",
",",
"p",
"in",
"params",
".",
"items",
"(",
")",
":",
"if",
"only_changed",
"and",
"not",
"(",
"name",
"in",
"changed",
")",
":",
"continue",
"constant",
"=",
"'C'",
"if",
"p",
".",
"constant",
"else",
"'V'",
"readonly",
"=",
"'RO'",
"if",
"p",
".",
"readonly",
"else",
"'RW'",
"allow_None",
"=",
"' AN'",
"if",
"hasattr",
"(",
"p",
",",
"'allow_None'",
")",
"and",
"p",
".",
"allow_None",
"else",
"''",
"mode",
"=",
"'%s %s%s'",
"%",
"(",
"constant",
",",
"readonly",
",",
"allow_None",
")",
"info_dict",
"[",
"name",
"]",
"=",
"{",
"'name'",
":",
"name",
",",
"'type'",
":",
"p",
".",
"__class__",
".",
"__name__",
",",
"'mode'",
":",
"mode",
"}",
"if",
"hasattr",
"(",
"p",
",",
"'bounds'",
")",
":",
"lbound",
",",
"ubound",
"=",
"(",
"None",
",",
"None",
")",
"if",
"p",
".",
"bounds",
"is",
"None",
"else",
"p",
".",
"bounds",
"mark_lbound",
",",
"mark_ubound",
"=",
"False",
",",
"False",
"# Use soft_bounds when bounds not defined.",
"if",
"hasattr",
"(",
"p",
",",
"'get_soft_bounds'",
")",
":",
"soft_lbound",
",",
"soft_ubound",
"=",
"p",
".",
"get_soft_bounds",
"(",
")",
"if",
"lbound",
"is",
"None",
"and",
"soft_lbound",
"is",
"not",
"None",
":",
"lbound",
"=",
"soft_lbound",
"mark_lbound",
"=",
"True",
"if",
"ubound",
"is",
"None",
"and",
"soft_ubound",
"is",
"not",
"None",
":",
"ubound",
"=",
"soft_ubound",
"mark_ubound",
"=",
"True",
"if",
"(",
"lbound",
",",
"ubound",
")",
"!=",
"(",
"None",
",",
"None",
")",
":",
"bounds_dict",
"[",
"name",
"]",
"=",
"(",
"mark_lbound",
",",
"mark_ubound",
")",
"info_dict",
"[",
"name",
"]",
"[",
"'bounds'",
"]",
"=",
"'(%s, %s)'",
"%",
"(",
"lbound",
",",
"ubound",
")",
"value",
"=",
"repr",
"(",
"val_dict",
"[",
"name",
"]",
")",
"if",
"len",
"(",
"value",
")",
">",
"(",
"max_col_len",
"-",
"3",
")",
":",
"value",
"=",
"value",
"[",
":",
"max_col_len",
"-",
"3",
"]",
"+",
"'...'",
"info_dict",
"[",
"name",
"]",
"[",
"'value'",
"]",
"=",
"value",
"for",
"col",
"in",
"info_dict",
"[",
"name",
"]",
":",
"max_width",
"=",
"max",
"(",
"[",
"col_widths",
"[",
"col",
"]",
",",
"len",
"(",
"info_dict",
"[",
"name",
"]",
"[",
"col",
"]",
")",
"]",
")",
"col_widths",
"[",
"col",
"]",
"=",
"max_width",
"return",
"self",
".",
"_tabulate",
"(",
"info_dict",
",",
"col_widths",
",",
"changed",
",",
"order",
",",
"bounds_dict",
")"
] | Collect the information about parameters needed to build a
properly formatted table and then tabulate it. | [
"Collect",
"the",
"information",
"about",
"parameters",
"needed",
"to",
"build",
"a",
"properly",
"formatted",
"table",
"and",
"then",
"tabulate",
"it",
"."
] | train | https://github.com/pyviz/param/blob/8f0dafa78defa883247b40635f96cc6d5c1b3481/param/ipython.py#L127-L176 | 0.004221 |
m110/climb | climb/core.py | Climb.execute | def execute(self, *args):
"""Executes single command and returns result."""
command, kwargs = self.parse(*args)
return self._commands.execute(command, **kwargs) | python | def execute(self, *args):
"""Executes single command and returns result."""
command, kwargs = self.parse(*args)
return self._commands.execute(command, **kwargs) | [
"def",
"execute",
"(",
"self",
",",
"*",
"args",
")",
":",
"command",
",",
"kwargs",
"=",
"self",
".",
"parse",
"(",
"*",
"args",
")",
"return",
"self",
".",
"_commands",
".",
"execute",
"(",
"command",
",",
"*",
"*",
"kwargs",
")"
] | Executes single command and returns result. | [
"Executes",
"single",
"command",
"and",
"returns",
"result",
"."
] | train | https://github.com/m110/climb/blob/0a35dfb94df48f85963490fbe0514c2ea80bff34/climb/core.py#L76-L79 | 0.01087 |
captin411/ofxclient | ofxclient/account.py | Account.local_id | def local_id(self):
"""Locally generated unique account identifier.
:rtype: string
"""
return hashlib.sha256(("%s%s" % (
self.institution.local_id(),
self.number)).encode()).hexdigest() | python | def local_id(self):
"""Locally generated unique account identifier.
:rtype: string
"""
return hashlib.sha256(("%s%s" % (
self.institution.local_id(),
self.number)).encode()).hexdigest() | [
"def",
"local_id",
"(",
"self",
")",
":",
"return",
"hashlib",
".",
"sha256",
"(",
"(",
"\"%s%s\"",
"%",
"(",
"self",
".",
"institution",
".",
"local_id",
"(",
")",
",",
"self",
".",
"number",
")",
")",
".",
"encode",
"(",
")",
")",
".",
"hexdigest",
"(",
")"
] | Locally generated unique account identifier.
:rtype: string | [
"Locally",
"generated",
"unique",
"account",
"identifier",
"."
] | train | https://github.com/captin411/ofxclient/blob/4da2719f0ecbbf5eee62fb82c1b3b34ec955ee5e/ofxclient/account.py#L67-L74 | 0.008264 |
saltstack/salt | salt/utils/environment.py | get_module_environment | def get_module_environment(env=None, function=None):
'''
Get module optional environment.
To setup an environment option for a particular module,
add either pillar or config at the minion as follows:
system-environment:
modules:
pkg:
_:
LC_ALL: en_GB.UTF-8
FOO: bar
install:
HELLO: world
states:
pkg:
_:
LC_ALL: en_US.Latin-1
NAME: Fred
So this will export the environment to all the modules,
states, returnes etc. And calling this function with the globals()
in that context will fetch the environment for further reuse.
Underscore '_' exports environment for all functions within the module.
If you want to specifially export environment only for one function,
specify it as in the example above "install".
First will be fetched configuration, where virtual name goes first,
then the physical name of the module overrides the virtual settings.
Then pillar settings will override the configuration in the same order.
:param env:
:param function: name of a particular function
:return: dict
'''
result = {}
if not env:
env = {}
for env_src in [env.get('__opts__', {}), env.get('__pillar__', {})]:
fname = env.get('__file__', '')
physical_name = os.path.basename(fname).split('.')[0]
section = os.path.basename(os.path.dirname(fname))
m_names = [env.get('__virtualname__')]
if physical_name not in m_names:
m_names.append(physical_name)
for m_name in m_names:
if not m_name:
continue
result.update(env_src.get('system-environment', {}).get(
section, {}).get(m_name, {}).get('_', {}).copy())
if function is not None:
result.update(env_src.get('system-environment', {}).get(
section, {}).get(m_name, {}).get(function, {}).copy())
return result | python | def get_module_environment(env=None, function=None):
'''
Get module optional environment.
To setup an environment option for a particular module,
add either pillar or config at the minion as follows:
system-environment:
modules:
pkg:
_:
LC_ALL: en_GB.UTF-8
FOO: bar
install:
HELLO: world
states:
pkg:
_:
LC_ALL: en_US.Latin-1
NAME: Fred
So this will export the environment to all the modules,
states, returnes etc. And calling this function with the globals()
in that context will fetch the environment for further reuse.
Underscore '_' exports environment for all functions within the module.
If you want to specifially export environment only for one function,
specify it as in the example above "install".
First will be fetched configuration, where virtual name goes first,
then the physical name of the module overrides the virtual settings.
Then pillar settings will override the configuration in the same order.
:param env:
:param function: name of a particular function
:return: dict
'''
result = {}
if not env:
env = {}
for env_src in [env.get('__opts__', {}), env.get('__pillar__', {})]:
fname = env.get('__file__', '')
physical_name = os.path.basename(fname).split('.')[0]
section = os.path.basename(os.path.dirname(fname))
m_names = [env.get('__virtualname__')]
if physical_name not in m_names:
m_names.append(physical_name)
for m_name in m_names:
if not m_name:
continue
result.update(env_src.get('system-environment', {}).get(
section, {}).get(m_name, {}).get('_', {}).copy())
if function is not None:
result.update(env_src.get('system-environment', {}).get(
section, {}).get(m_name, {}).get(function, {}).copy())
return result | [
"def",
"get_module_environment",
"(",
"env",
"=",
"None",
",",
"function",
"=",
"None",
")",
":",
"result",
"=",
"{",
"}",
"if",
"not",
"env",
":",
"env",
"=",
"{",
"}",
"for",
"env_src",
"in",
"[",
"env",
".",
"get",
"(",
"'__opts__'",
",",
"{",
"}",
")",
",",
"env",
".",
"get",
"(",
"'__pillar__'",
",",
"{",
"}",
")",
"]",
":",
"fname",
"=",
"env",
".",
"get",
"(",
"'__file__'",
",",
"''",
")",
"physical_name",
"=",
"os",
".",
"path",
".",
"basename",
"(",
"fname",
")",
".",
"split",
"(",
"'.'",
")",
"[",
"0",
"]",
"section",
"=",
"os",
".",
"path",
".",
"basename",
"(",
"os",
".",
"path",
".",
"dirname",
"(",
"fname",
")",
")",
"m_names",
"=",
"[",
"env",
".",
"get",
"(",
"'__virtualname__'",
")",
"]",
"if",
"physical_name",
"not",
"in",
"m_names",
":",
"m_names",
".",
"append",
"(",
"physical_name",
")",
"for",
"m_name",
"in",
"m_names",
":",
"if",
"not",
"m_name",
":",
"continue",
"result",
".",
"update",
"(",
"env_src",
".",
"get",
"(",
"'system-environment'",
",",
"{",
"}",
")",
".",
"get",
"(",
"section",
",",
"{",
"}",
")",
".",
"get",
"(",
"m_name",
",",
"{",
"}",
")",
".",
"get",
"(",
"'_'",
",",
"{",
"}",
")",
".",
"copy",
"(",
")",
")",
"if",
"function",
"is",
"not",
"None",
":",
"result",
".",
"update",
"(",
"env_src",
".",
"get",
"(",
"'system-environment'",
",",
"{",
"}",
")",
".",
"get",
"(",
"section",
",",
"{",
"}",
")",
".",
"get",
"(",
"m_name",
",",
"{",
"}",
")",
".",
"get",
"(",
"function",
",",
"{",
"}",
")",
".",
"copy",
"(",
")",
")",
"return",
"result"
] | Get module optional environment.
To setup an environment option for a particular module,
add either pillar or config at the minion as follows:
system-environment:
modules:
pkg:
_:
LC_ALL: en_GB.UTF-8
FOO: bar
install:
HELLO: world
states:
pkg:
_:
LC_ALL: en_US.Latin-1
NAME: Fred
So this will export the environment to all the modules,
states, returnes etc. And calling this function with the globals()
in that context will fetch the environment for further reuse.
Underscore '_' exports environment for all functions within the module.
If you want to specifially export environment only for one function,
specify it as in the example above "install".
First will be fetched configuration, where virtual name goes first,
then the physical name of the module overrides the virtual settings.
Then pillar settings will override the configuration in the same order.
:param env:
:param function: name of a particular function
:return: dict | [
"Get",
"module",
"optional",
"environment",
"."
] | train | https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/utils/environment.py#L9-L65 | 0.000494 |
fastai/fastai | fastai/vision/gan.py | GANTrainer.on_backward_begin | def on_backward_begin(self, last_loss, last_output, **kwargs):
"Record `last_loss` in the proper list."
last_loss = last_loss.detach().cpu()
if self.gen_mode:
self.smoothenerG.add_value(last_loss)
self.glosses.append(self.smoothenerG.smooth)
self.last_gen = last_output.detach().cpu()
else:
self.smoothenerC.add_value(last_loss)
self.closses.append(self.smoothenerC.smooth) | python | def on_backward_begin(self, last_loss, last_output, **kwargs):
"Record `last_loss` in the proper list."
last_loss = last_loss.detach().cpu()
if self.gen_mode:
self.smoothenerG.add_value(last_loss)
self.glosses.append(self.smoothenerG.smooth)
self.last_gen = last_output.detach().cpu()
else:
self.smoothenerC.add_value(last_loss)
self.closses.append(self.smoothenerC.smooth) | [
"def",
"on_backward_begin",
"(",
"self",
",",
"last_loss",
",",
"last_output",
",",
"*",
"*",
"kwargs",
")",
":",
"last_loss",
"=",
"last_loss",
".",
"detach",
"(",
")",
".",
"cpu",
"(",
")",
"if",
"self",
".",
"gen_mode",
":",
"self",
".",
"smoothenerG",
".",
"add_value",
"(",
"last_loss",
")",
"self",
".",
"glosses",
".",
"append",
"(",
"self",
".",
"smoothenerG",
".",
"smooth",
")",
"self",
".",
"last_gen",
"=",
"last_output",
".",
"detach",
"(",
")",
".",
"cpu",
"(",
")",
"else",
":",
"self",
".",
"smoothenerC",
".",
"add_value",
"(",
"last_loss",
")",
"self",
".",
"closses",
".",
"append",
"(",
"self",
".",
"smoothenerC",
".",
"smooth",
")"
] | Record `last_loss` in the proper list. | [
"Record",
"last_loss",
"in",
"the",
"proper",
"list",
"."
] | train | https://github.com/fastai/fastai/blob/9fb84a5cdefe5a766cdb792b8f5d8971737b7e67/fastai/vision/gan.py#L116-L125 | 0.004301 |
ploneintranet/ploneintranet.workspace | src/ploneintranet/workspace/browser/views.py | SharingView.role_settings | def role_settings(self):
""" Filter out unwanted to show groups """
result = super(SharingView, self).role_settings()
uid = self.context.UID()
filter_func = lambda x: not any((
x["id"].endswith(uid),
x["id"] == "AuthenticatedUsers",
x["id"] == INTRANET_USERS_GROUP_ID,
))
return filter(filter_func, result) | python | def role_settings(self):
""" Filter out unwanted to show groups """
result = super(SharingView, self).role_settings()
uid = self.context.UID()
filter_func = lambda x: not any((
x["id"].endswith(uid),
x["id"] == "AuthenticatedUsers",
x["id"] == INTRANET_USERS_GROUP_ID,
))
return filter(filter_func, result) | [
"def",
"role_settings",
"(",
"self",
")",
":",
"result",
"=",
"super",
"(",
"SharingView",
",",
"self",
")",
".",
"role_settings",
"(",
")",
"uid",
"=",
"self",
".",
"context",
".",
"UID",
"(",
")",
"filter_func",
"=",
"lambda",
"x",
":",
"not",
"any",
"(",
"(",
"x",
"[",
"\"id\"",
"]",
".",
"endswith",
"(",
"uid",
")",
",",
"x",
"[",
"\"id\"",
"]",
"==",
"\"AuthenticatedUsers\"",
",",
"x",
"[",
"\"id\"",
"]",
"==",
"INTRANET_USERS_GROUP_ID",
",",
")",
")",
"return",
"filter",
"(",
"filter_func",
",",
"result",
")"
] | Filter out unwanted to show groups | [
"Filter",
"out",
"unwanted",
"to",
"show",
"groups"
] | train | https://github.com/ploneintranet/ploneintranet.workspace/blob/a4fc7a5c61f9c6d4d4ad25478ff5250f342ffbba/src/ploneintranet/workspace/browser/views.py#L47-L56 | 0.007614 |
fogleman/pg | pg/util.py | normal_from_points | def normal_from_points(a, b, c):
'''Computes a normal vector given three points.
'''
x1, y1, z1 = a
x2, y2, z2 = b
x3, y3, z3 = c
ab = (x2 - x1, y2 - y1, z2 - z1)
ac = (x3 - x1, y3 - y1, z3 - z1)
x, y, z = cross(ab, ac)
d = (x * x + y * y + z * z) ** 0.5
return (x / d, y / d, z / d) | python | def normal_from_points(a, b, c):
'''Computes a normal vector given three points.
'''
x1, y1, z1 = a
x2, y2, z2 = b
x3, y3, z3 = c
ab = (x2 - x1, y2 - y1, z2 - z1)
ac = (x3 - x1, y3 - y1, z3 - z1)
x, y, z = cross(ab, ac)
d = (x * x + y * y + z * z) ** 0.5
return (x / d, y / d, z / d) | [
"def",
"normal_from_points",
"(",
"a",
",",
"b",
",",
"c",
")",
":",
"x1",
",",
"y1",
",",
"z1",
"=",
"a",
"x2",
",",
"y2",
",",
"z2",
"=",
"b",
"x3",
",",
"y3",
",",
"z3",
"=",
"c",
"ab",
"=",
"(",
"x2",
"-",
"x1",
",",
"y2",
"-",
"y1",
",",
"z2",
"-",
"z1",
")",
"ac",
"=",
"(",
"x3",
"-",
"x1",
",",
"y3",
"-",
"y1",
",",
"z3",
"-",
"z1",
")",
"x",
",",
"y",
",",
"z",
"=",
"cross",
"(",
"ab",
",",
"ac",
")",
"d",
"=",
"(",
"x",
"*",
"x",
"+",
"y",
"*",
"y",
"+",
"z",
"*",
"z",
")",
"**",
"0.5",
"return",
"(",
"x",
"/",
"d",
",",
"y",
"/",
"d",
",",
"z",
"/",
"d",
")"
] | Computes a normal vector given three points. | [
"Computes",
"a",
"normal",
"vector",
"given",
"three",
"points",
"."
] | train | https://github.com/fogleman/pg/blob/124ea3803c788b2c98c4f3a428e5d26842a67b58/pg/util.py#L68-L78 | 0.003096 |
tmux-python/tmuxp | tmuxp/config.py | import_tmuxinator | def import_tmuxinator(sconf):
"""Return tmuxp config from a `tmuxinator`_ yaml config.
.. _tmuxinator: https://github.com/aziz/tmuxinator
Parameters
----------
sconf : dict
python dict for session configuration.
Returns
-------
dict
"""
tmuxp_config = {}
if 'project_name' in sconf:
tmuxp_config['session_name'] = sconf.pop('project_name')
elif 'name' in sconf:
tmuxp_config['session_name'] = sconf.pop('name')
else:
tmuxp_config['session_name'] = None
if 'project_root' in sconf:
tmuxp_config['start_directory'] = sconf.pop('project_root')
elif 'root' in sconf:
tmuxp_config['start_directory'] = sconf.pop('root')
if 'cli_args' in sconf:
tmuxp_config['config'] = sconf['cli_args']
if '-f' in tmuxp_config['config']:
tmuxp_config['config'] = tmuxp_config['config'].replace('-f', '').strip()
elif 'tmux_options' in sconf:
tmuxp_config['config'] = sconf['tmux_options']
if '-f' in tmuxp_config['config']:
tmuxp_config['config'] = tmuxp_config['config'].replace('-f', '').strip()
if 'socket_name' in sconf:
tmuxp_config['socket_name'] = sconf['socket_name']
tmuxp_config['windows'] = []
if 'tabs' in sconf:
sconf['windows'] = sconf.pop('tabs')
if 'pre' in sconf and 'pre_window' in sconf:
tmuxp_config['shell_command'] = sconf['pre']
if isinstance(sconf['pre'], string_types):
tmuxp_config['shell_command_before'] = [sconf['pre_window']]
else:
tmuxp_config['shell_command_before'] = sconf['pre_window']
elif 'pre' in sconf:
if isinstance(sconf['pre'], string_types):
tmuxp_config['shell_command_before'] = [sconf['pre']]
else:
tmuxp_config['shell_command_before'] = sconf['pre']
if 'rbenv' in sconf:
if 'shell_command_before' not in tmuxp_config:
tmuxp_config['shell_command_before'] = []
tmuxp_config['shell_command_before'].append('rbenv shell %s' % sconf['rbenv'])
for w in sconf['windows']:
for k, v in w.items():
windowdict = {'window_name': k}
if isinstance(v, string_types) or v is None:
windowdict['panes'] = [v]
tmuxp_config['windows'].append(windowdict)
continue
elif isinstance(v, list):
windowdict['panes'] = v
tmuxp_config['windows'].append(windowdict)
continue
if 'pre' in v:
windowdict['shell_command_before'] = v['pre']
if 'panes' in v:
windowdict['panes'] = v['panes']
if 'root' in v:
windowdict['start_directory'] = v['root']
if 'layout' in v:
windowdict['layout'] = v['layout']
tmuxp_config['windows'].append(windowdict)
return tmuxp_config | python | def import_tmuxinator(sconf):
"""Return tmuxp config from a `tmuxinator`_ yaml config.
.. _tmuxinator: https://github.com/aziz/tmuxinator
Parameters
----------
sconf : dict
python dict for session configuration.
Returns
-------
dict
"""
tmuxp_config = {}
if 'project_name' in sconf:
tmuxp_config['session_name'] = sconf.pop('project_name')
elif 'name' in sconf:
tmuxp_config['session_name'] = sconf.pop('name')
else:
tmuxp_config['session_name'] = None
if 'project_root' in sconf:
tmuxp_config['start_directory'] = sconf.pop('project_root')
elif 'root' in sconf:
tmuxp_config['start_directory'] = sconf.pop('root')
if 'cli_args' in sconf:
tmuxp_config['config'] = sconf['cli_args']
if '-f' in tmuxp_config['config']:
tmuxp_config['config'] = tmuxp_config['config'].replace('-f', '').strip()
elif 'tmux_options' in sconf:
tmuxp_config['config'] = sconf['tmux_options']
if '-f' in tmuxp_config['config']:
tmuxp_config['config'] = tmuxp_config['config'].replace('-f', '').strip()
if 'socket_name' in sconf:
tmuxp_config['socket_name'] = sconf['socket_name']
tmuxp_config['windows'] = []
if 'tabs' in sconf:
sconf['windows'] = sconf.pop('tabs')
if 'pre' in sconf and 'pre_window' in sconf:
tmuxp_config['shell_command'] = sconf['pre']
if isinstance(sconf['pre'], string_types):
tmuxp_config['shell_command_before'] = [sconf['pre_window']]
else:
tmuxp_config['shell_command_before'] = sconf['pre_window']
elif 'pre' in sconf:
if isinstance(sconf['pre'], string_types):
tmuxp_config['shell_command_before'] = [sconf['pre']]
else:
tmuxp_config['shell_command_before'] = sconf['pre']
if 'rbenv' in sconf:
if 'shell_command_before' not in tmuxp_config:
tmuxp_config['shell_command_before'] = []
tmuxp_config['shell_command_before'].append('rbenv shell %s' % sconf['rbenv'])
for w in sconf['windows']:
for k, v in w.items():
windowdict = {'window_name': k}
if isinstance(v, string_types) or v is None:
windowdict['panes'] = [v]
tmuxp_config['windows'].append(windowdict)
continue
elif isinstance(v, list):
windowdict['panes'] = v
tmuxp_config['windows'].append(windowdict)
continue
if 'pre' in v:
windowdict['shell_command_before'] = v['pre']
if 'panes' in v:
windowdict['panes'] = v['panes']
if 'root' in v:
windowdict['start_directory'] = v['root']
if 'layout' in v:
windowdict['layout'] = v['layout']
tmuxp_config['windows'].append(windowdict)
return tmuxp_config | [
"def",
"import_tmuxinator",
"(",
"sconf",
")",
":",
"tmuxp_config",
"=",
"{",
"}",
"if",
"'project_name'",
"in",
"sconf",
":",
"tmuxp_config",
"[",
"'session_name'",
"]",
"=",
"sconf",
".",
"pop",
"(",
"'project_name'",
")",
"elif",
"'name'",
"in",
"sconf",
":",
"tmuxp_config",
"[",
"'session_name'",
"]",
"=",
"sconf",
".",
"pop",
"(",
"'name'",
")",
"else",
":",
"tmuxp_config",
"[",
"'session_name'",
"]",
"=",
"None",
"if",
"'project_root'",
"in",
"sconf",
":",
"tmuxp_config",
"[",
"'start_directory'",
"]",
"=",
"sconf",
".",
"pop",
"(",
"'project_root'",
")",
"elif",
"'root'",
"in",
"sconf",
":",
"tmuxp_config",
"[",
"'start_directory'",
"]",
"=",
"sconf",
".",
"pop",
"(",
"'root'",
")",
"if",
"'cli_args'",
"in",
"sconf",
":",
"tmuxp_config",
"[",
"'config'",
"]",
"=",
"sconf",
"[",
"'cli_args'",
"]",
"if",
"'-f'",
"in",
"tmuxp_config",
"[",
"'config'",
"]",
":",
"tmuxp_config",
"[",
"'config'",
"]",
"=",
"tmuxp_config",
"[",
"'config'",
"]",
".",
"replace",
"(",
"'-f'",
",",
"''",
")",
".",
"strip",
"(",
")",
"elif",
"'tmux_options'",
"in",
"sconf",
":",
"tmuxp_config",
"[",
"'config'",
"]",
"=",
"sconf",
"[",
"'tmux_options'",
"]",
"if",
"'-f'",
"in",
"tmuxp_config",
"[",
"'config'",
"]",
":",
"tmuxp_config",
"[",
"'config'",
"]",
"=",
"tmuxp_config",
"[",
"'config'",
"]",
".",
"replace",
"(",
"'-f'",
",",
"''",
")",
".",
"strip",
"(",
")",
"if",
"'socket_name'",
"in",
"sconf",
":",
"tmuxp_config",
"[",
"'socket_name'",
"]",
"=",
"sconf",
"[",
"'socket_name'",
"]",
"tmuxp_config",
"[",
"'windows'",
"]",
"=",
"[",
"]",
"if",
"'tabs'",
"in",
"sconf",
":",
"sconf",
"[",
"'windows'",
"]",
"=",
"sconf",
".",
"pop",
"(",
"'tabs'",
")",
"if",
"'pre'",
"in",
"sconf",
"and",
"'pre_window'",
"in",
"sconf",
":",
"tmuxp_config",
"[",
"'shell_command'",
"]",
"=",
"sconf",
"[",
"'pre'",
"]",
"if",
"isinstance",
"(",
"sconf",
"[",
"'pre'",
"]",
",",
"string_types",
")",
":",
"tmuxp_config",
"[",
"'shell_command_before'",
"]",
"=",
"[",
"sconf",
"[",
"'pre_window'",
"]",
"]",
"else",
":",
"tmuxp_config",
"[",
"'shell_command_before'",
"]",
"=",
"sconf",
"[",
"'pre_window'",
"]",
"elif",
"'pre'",
"in",
"sconf",
":",
"if",
"isinstance",
"(",
"sconf",
"[",
"'pre'",
"]",
",",
"string_types",
")",
":",
"tmuxp_config",
"[",
"'shell_command_before'",
"]",
"=",
"[",
"sconf",
"[",
"'pre'",
"]",
"]",
"else",
":",
"tmuxp_config",
"[",
"'shell_command_before'",
"]",
"=",
"sconf",
"[",
"'pre'",
"]",
"if",
"'rbenv'",
"in",
"sconf",
":",
"if",
"'shell_command_before'",
"not",
"in",
"tmuxp_config",
":",
"tmuxp_config",
"[",
"'shell_command_before'",
"]",
"=",
"[",
"]",
"tmuxp_config",
"[",
"'shell_command_before'",
"]",
".",
"append",
"(",
"'rbenv shell %s'",
"%",
"sconf",
"[",
"'rbenv'",
"]",
")",
"for",
"w",
"in",
"sconf",
"[",
"'windows'",
"]",
":",
"for",
"k",
",",
"v",
"in",
"w",
".",
"items",
"(",
")",
":",
"windowdict",
"=",
"{",
"'window_name'",
":",
"k",
"}",
"if",
"isinstance",
"(",
"v",
",",
"string_types",
")",
"or",
"v",
"is",
"None",
":",
"windowdict",
"[",
"'panes'",
"]",
"=",
"[",
"v",
"]",
"tmuxp_config",
"[",
"'windows'",
"]",
".",
"append",
"(",
"windowdict",
")",
"continue",
"elif",
"isinstance",
"(",
"v",
",",
"list",
")",
":",
"windowdict",
"[",
"'panes'",
"]",
"=",
"v",
"tmuxp_config",
"[",
"'windows'",
"]",
".",
"append",
"(",
"windowdict",
")",
"continue",
"if",
"'pre'",
"in",
"v",
":",
"windowdict",
"[",
"'shell_command_before'",
"]",
"=",
"v",
"[",
"'pre'",
"]",
"if",
"'panes'",
"in",
"v",
":",
"windowdict",
"[",
"'panes'",
"]",
"=",
"v",
"[",
"'panes'",
"]",
"if",
"'root'",
"in",
"v",
":",
"windowdict",
"[",
"'start_directory'",
"]",
"=",
"v",
"[",
"'root'",
"]",
"if",
"'layout'",
"in",
"v",
":",
"windowdict",
"[",
"'layout'",
"]",
"=",
"v",
"[",
"'layout'",
"]",
"tmuxp_config",
"[",
"'windows'",
"]",
".",
"append",
"(",
"windowdict",
")",
"return",
"tmuxp_config"
] | Return tmuxp config from a `tmuxinator`_ yaml config.
.. _tmuxinator: https://github.com/aziz/tmuxinator
Parameters
----------
sconf : dict
python dict for session configuration.
Returns
-------
dict | [
"Return",
"tmuxp",
"config",
"from",
"a",
"tmuxinator",
"_",
"yaml",
"config",
"."
] | train | https://github.com/tmux-python/tmuxp/blob/f4aa2e26589a4311131898d2e4a85cb1876b5c9b/tmuxp/config.py#L394-L484 | 0.001344 |
hfaran/Tornado-JSON | tornado_json/api_doc_gen.py | _add_indent | def _add_indent(string, indent):
"""Add indent of ``indent`` spaces to ``string.split("\n")[1:]``
Useful for formatting in strings to already indented blocks
"""
lines = string.split("\n")
first, lines = lines[0], lines[1:]
lines = ["{indent}{s}".format(indent=" " * indent, s=s)
for s in lines]
lines = [first] + lines
return "\n".join(lines) | python | def _add_indent(string, indent):
"""Add indent of ``indent`` spaces to ``string.split("\n")[1:]``
Useful for formatting in strings to already indented blocks
"""
lines = string.split("\n")
first, lines = lines[0], lines[1:]
lines = ["{indent}{s}".format(indent=" " * indent, s=s)
for s in lines]
lines = [first] + lines
return "\n".join(lines) | [
"def",
"_add_indent",
"(",
"string",
",",
"indent",
")",
":",
"lines",
"=",
"string",
".",
"split",
"(",
"\"\\n\"",
")",
"first",
",",
"lines",
"=",
"lines",
"[",
"0",
"]",
",",
"lines",
"[",
"1",
":",
"]",
"lines",
"=",
"[",
"\"{indent}{s}\"",
".",
"format",
"(",
"indent",
"=",
"\" \"",
"*",
"indent",
",",
"s",
"=",
"s",
")",
"for",
"s",
"in",
"lines",
"]",
"lines",
"=",
"[",
"first",
"]",
"+",
"lines",
"return",
"\"\\n\"",
".",
"join",
"(",
"lines",
")"
] | Add indent of ``indent`` spaces to ``string.split("\n")[1:]``
Useful for formatting in strings to already indented blocks | [
"Add",
"indent",
"of",
"indent",
"spaces",
"to",
"string",
".",
"split",
"(",
"\\",
"n",
")",
"[",
"1",
":",
"]"
] | train | https://github.com/hfaran/Tornado-JSON/blob/8d8b35ff77f13cb3ab1a606bd2083b26cc69c54f/tornado_json/api_doc_gen.py#L111-L121 | 0.002571 |
glitchassassin/lackey | lackey/RegionMatching.py | Region.getBitmap | def getBitmap(self):
""" Captures screen area of this region, at least the part that is on the screen
Returns image as numpy array
"""
return PlatformManager.getBitmapFromRect(self.x, self.y, self.w, self.h) | python | def getBitmap(self):
""" Captures screen area of this region, at least the part that is on the screen
Returns image as numpy array
"""
return PlatformManager.getBitmapFromRect(self.x, self.y, self.w, self.h) | [
"def",
"getBitmap",
"(",
"self",
")",
":",
"return",
"PlatformManager",
".",
"getBitmapFromRect",
"(",
"self",
".",
"x",
",",
"self",
".",
"y",
",",
"self",
".",
"w",
",",
"self",
".",
"h",
")"
] | Captures screen area of this region, at least the part that is on the screen
Returns image as numpy array | [
"Captures",
"screen",
"area",
"of",
"this",
"region",
"at",
"least",
"the",
"part",
"that",
"is",
"on",
"the",
"screen"
] | train | https://github.com/glitchassassin/lackey/blob/7adadfacd7f45d81186710be992f5668b15399fe/lackey/RegionMatching.py#L449-L454 | 0.016667 |
terrycain/aioboto3 | aioboto3/s3/inject.py | download_fileobj | async def download_fileobj(self, Bucket, Key, Fileobj, ExtraArgs=None, Callback=None, Config=None):
"""Download an object from S3 to a file-like object.
The file-like object must be in binary mode.
This is a managed transfer which will perform a multipart download in
multiple threads if necessary.
Usage::
import boto3
s3 = boto3.client('s3')
with open('filename', 'wb') as data:
s3.download_fileobj('mybucket', 'mykey', data)
:type Fileobj: a file-like object
:param Fileobj: A file-like object to download into. At a minimum, it must
implement the `write` method and must accept bytes.
:type Bucket: str
:param Bucket: The name of the bucket to download from.
:type Key: str
:param Key: The name of the key to download from.
:type ExtraArgs: dict
:param ExtraArgs: Extra arguments that may be passed to the
client operation.
:type Callback: method
:param Callback: A method which takes a number of bytes transferred to
be periodically called during the download.
:type Config: boto3.s3.transfer.TransferConfig
:param Config: The transfer configuration to be used when performing the
download.
"""
try:
resp = await self.get_object(Bucket=Bucket, Key=Key)
except ClientError as err:
if err.response['Error']['Code'] == 'NoSuchKey':
# Convert to 404 so it looks the same when boto3.download_file fails
raise ClientError({'Error': {'Code': '404', 'Message': 'Not Found'}}, 'HeadObject')
raise
body = resp['Body']
while True:
data = await body.read(4096)
if data == b'':
break
if Callback:
try:
Callback(len(data))
except: # noqa: E722
pass
Fileobj.write(data)
await asyncio.sleep(0.0) | python | async def download_fileobj(self, Bucket, Key, Fileobj, ExtraArgs=None, Callback=None, Config=None):
"""Download an object from S3 to a file-like object.
The file-like object must be in binary mode.
This is a managed transfer which will perform a multipart download in
multiple threads if necessary.
Usage::
import boto3
s3 = boto3.client('s3')
with open('filename', 'wb') as data:
s3.download_fileobj('mybucket', 'mykey', data)
:type Fileobj: a file-like object
:param Fileobj: A file-like object to download into. At a minimum, it must
implement the `write` method and must accept bytes.
:type Bucket: str
:param Bucket: The name of the bucket to download from.
:type Key: str
:param Key: The name of the key to download from.
:type ExtraArgs: dict
:param ExtraArgs: Extra arguments that may be passed to the
client operation.
:type Callback: method
:param Callback: A method which takes a number of bytes transferred to
be periodically called during the download.
:type Config: boto3.s3.transfer.TransferConfig
:param Config: The transfer configuration to be used when performing the
download.
"""
try:
resp = await self.get_object(Bucket=Bucket, Key=Key)
except ClientError as err:
if err.response['Error']['Code'] == 'NoSuchKey':
# Convert to 404 so it looks the same when boto3.download_file fails
raise ClientError({'Error': {'Code': '404', 'Message': 'Not Found'}}, 'HeadObject')
raise
body = resp['Body']
while True:
data = await body.read(4096)
if data == b'':
break
if Callback:
try:
Callback(len(data))
except: # noqa: E722
pass
Fileobj.write(data)
await asyncio.sleep(0.0) | [
"async",
"def",
"download_fileobj",
"(",
"self",
",",
"Bucket",
",",
"Key",
",",
"Fileobj",
",",
"ExtraArgs",
"=",
"None",
",",
"Callback",
"=",
"None",
",",
"Config",
"=",
"None",
")",
":",
"try",
":",
"resp",
"=",
"await",
"self",
".",
"get_object",
"(",
"Bucket",
"=",
"Bucket",
",",
"Key",
"=",
"Key",
")",
"except",
"ClientError",
"as",
"err",
":",
"if",
"err",
".",
"response",
"[",
"'Error'",
"]",
"[",
"'Code'",
"]",
"==",
"'NoSuchKey'",
":",
"# Convert to 404 so it looks the same when boto3.download_file fails",
"raise",
"ClientError",
"(",
"{",
"'Error'",
":",
"{",
"'Code'",
":",
"'404'",
",",
"'Message'",
":",
"'Not Found'",
"}",
"}",
",",
"'HeadObject'",
")",
"raise",
"body",
"=",
"resp",
"[",
"'Body'",
"]",
"while",
"True",
":",
"data",
"=",
"await",
"body",
".",
"read",
"(",
"4096",
")",
"if",
"data",
"==",
"b''",
":",
"break",
"if",
"Callback",
":",
"try",
":",
"Callback",
"(",
"len",
"(",
"data",
")",
")",
"except",
":",
"# noqa: E722",
"pass",
"Fileobj",
".",
"write",
"(",
"data",
")",
"await",
"asyncio",
".",
"sleep",
"(",
"0.0",
")"
] | Download an object from S3 to a file-like object.
The file-like object must be in binary mode.
This is a managed transfer which will perform a multipart download in
multiple threads if necessary.
Usage::
import boto3
s3 = boto3.client('s3')
with open('filename', 'wb') as data:
s3.download_fileobj('mybucket', 'mykey', data)
:type Fileobj: a file-like object
:param Fileobj: A file-like object to download into. At a minimum, it must
implement the `write` method and must accept bytes.
:type Bucket: str
:param Bucket: The name of the bucket to download from.
:type Key: str
:param Key: The name of the key to download from.
:type ExtraArgs: dict
:param ExtraArgs: Extra arguments that may be passed to the
client operation.
:type Callback: method
:param Callback: A method which takes a number of bytes transferred to
be periodically called during the download.
:type Config: boto3.s3.transfer.TransferConfig
:param Config: The transfer configuration to be used when performing the
download. | [
"Download",
"an",
"object",
"from",
"S3",
"to",
"a",
"file",
"-",
"like",
"object",
"."
] | train | https://github.com/terrycain/aioboto3/blob/0fd192175461f7bb192f3ed9a872591caf8474ac/aioboto3/s3/inject.py#L33-L95 | 0.00209 |
alixnovosi/botskeleton | botskeleton/outputs/output_birdsite.py | BirdsiteSkeleton.send_with_media | def send_with_media(
self,
*,
text: str,
files: List[str],
captions: List[str]=[]
) -> List[OutputRecord]:
"""
Upload media to birdsite,
and send status and media,
and captions if present.
:param text: tweet text.
:param files: list of files to upload with post.
:param captions: list of captions to include as alt-text with files.
:returns: list of output records,
each corresponding to either a single post,
or an error.
"""
# upload media
media_ids = None
try:
self.ldebug(f"Uploading files {files}.")
media_ids = [self.api.media_upload(file).media_id_string for file in files]
except tweepy.TweepError as e:
return [self.handle_error(
message=f"Bot {self.bot_name} encountered an error when uploading {files}:\n{e}\n",
error=e)]
# apply captions, if present
self._handle_caption_upload(media_ids=media_ids, captions=captions)
# send status
try:
status = self.api.update_status(status=text, media_ids=media_ids)
self.ldebug(f"Status object from tweet: {status}.")
return [TweetRecord(record_data={
"tweet_id": status._json["id"],
"text": text,
"media_ids": media_ids,
"captions": captions,
"files": files
})]
except tweepy.TweepError as e:
return [self.handle_error(
message=(f"Bot {self.bot_name} encountered an error when "
f"sending post {text} with media ids {media_ids}:\n{e}\n"),
error=e)] | python | def send_with_media(
self,
*,
text: str,
files: List[str],
captions: List[str]=[]
) -> List[OutputRecord]:
"""
Upload media to birdsite,
and send status and media,
and captions if present.
:param text: tweet text.
:param files: list of files to upload with post.
:param captions: list of captions to include as alt-text with files.
:returns: list of output records,
each corresponding to either a single post,
or an error.
"""
# upload media
media_ids = None
try:
self.ldebug(f"Uploading files {files}.")
media_ids = [self.api.media_upload(file).media_id_string for file in files]
except tweepy.TweepError as e:
return [self.handle_error(
message=f"Bot {self.bot_name} encountered an error when uploading {files}:\n{e}\n",
error=e)]
# apply captions, if present
self._handle_caption_upload(media_ids=media_ids, captions=captions)
# send status
try:
status = self.api.update_status(status=text, media_ids=media_ids)
self.ldebug(f"Status object from tweet: {status}.")
return [TweetRecord(record_data={
"tweet_id": status._json["id"],
"text": text,
"media_ids": media_ids,
"captions": captions,
"files": files
})]
except tweepy.TweepError as e:
return [self.handle_error(
message=(f"Bot {self.bot_name} encountered an error when "
f"sending post {text} with media ids {media_ids}:\n{e}\n"),
error=e)] | [
"def",
"send_with_media",
"(",
"self",
",",
"*",
",",
"text",
":",
"str",
",",
"files",
":",
"List",
"[",
"str",
"]",
",",
"captions",
":",
"List",
"[",
"str",
"]",
"=",
"[",
"]",
")",
"->",
"List",
"[",
"OutputRecord",
"]",
":",
"# upload media",
"media_ids",
"=",
"None",
"try",
":",
"self",
".",
"ldebug",
"(",
"f\"Uploading files {files}.\"",
")",
"media_ids",
"=",
"[",
"self",
".",
"api",
".",
"media_upload",
"(",
"file",
")",
".",
"media_id_string",
"for",
"file",
"in",
"files",
"]",
"except",
"tweepy",
".",
"TweepError",
"as",
"e",
":",
"return",
"[",
"self",
".",
"handle_error",
"(",
"message",
"=",
"f\"Bot {self.bot_name} encountered an error when uploading {files}:\\n{e}\\n\"",
",",
"error",
"=",
"e",
")",
"]",
"# apply captions, if present",
"self",
".",
"_handle_caption_upload",
"(",
"media_ids",
"=",
"media_ids",
",",
"captions",
"=",
"captions",
")",
"# send status",
"try",
":",
"status",
"=",
"self",
".",
"api",
".",
"update_status",
"(",
"status",
"=",
"text",
",",
"media_ids",
"=",
"media_ids",
")",
"self",
".",
"ldebug",
"(",
"f\"Status object from tweet: {status}.\"",
")",
"return",
"[",
"TweetRecord",
"(",
"record_data",
"=",
"{",
"\"tweet_id\"",
":",
"status",
".",
"_json",
"[",
"\"id\"",
"]",
",",
"\"text\"",
":",
"text",
",",
"\"media_ids\"",
":",
"media_ids",
",",
"\"captions\"",
":",
"captions",
",",
"\"files\"",
":",
"files",
"}",
")",
"]",
"except",
"tweepy",
".",
"TweepError",
"as",
"e",
":",
"return",
"[",
"self",
".",
"handle_error",
"(",
"message",
"=",
"(",
"f\"Bot {self.bot_name} encountered an error when \"",
"f\"sending post {text} with media ids {media_ids}:\\n{e}\\n\"",
")",
",",
"error",
"=",
"e",
")",
"]"
] | Upload media to birdsite,
and send status and media,
and captions if present.
:param text: tweet text.
:param files: list of files to upload with post.
:param captions: list of captions to include as alt-text with files.
:returns: list of output records,
each corresponding to either a single post,
or an error. | [
"Upload",
"media",
"to",
"birdsite",
"and",
"send",
"status",
"and",
"media",
"and",
"captions",
"if",
"present",
"."
] | train | https://github.com/alixnovosi/botskeleton/blob/55bfc1b8a3623c10437e4ab2cd0b0ec8d35907a9/botskeleton/outputs/output_birdsite.py#L95-L144 | 0.004464 |
StellarCN/py-stellar-base | stellar_base/stellarxdr/xdrgen.py | p_optional_value | def p_optional_value(t):
"""optional_value : value
| empty"""
# return value or None.
t[0] = t[1]
# Note this must be unsigned
value = t[0]
if value is None or value[0].isdigit():
return
msg = ''
if value[0] == '-':
msg = "Can't use negative index %s" % value
elif value not in name_dict:
msg = "Can't derefence index %s" % value
else:
data = name_dict[value]
if data.type != 'const':
msg = "Can't use non-constant %s %s as index" % (data.type, value)
elif not data.positive:
msg = "Can't use negative index %s" % value
if msg:
global error_occurred
error_occurred = True
print(u"ERROR - {0:s} near line {1:d}".format(msg, t.lineno(1))) | python | def p_optional_value(t):
"""optional_value : value
| empty"""
# return value or None.
t[0] = t[1]
# Note this must be unsigned
value = t[0]
if value is None or value[0].isdigit():
return
msg = ''
if value[0] == '-':
msg = "Can't use negative index %s" % value
elif value not in name_dict:
msg = "Can't derefence index %s" % value
else:
data = name_dict[value]
if data.type != 'const':
msg = "Can't use non-constant %s %s as index" % (data.type, value)
elif not data.positive:
msg = "Can't use negative index %s" % value
if msg:
global error_occurred
error_occurred = True
print(u"ERROR - {0:s} near line {1:d}".format(msg, t.lineno(1))) | [
"def",
"p_optional_value",
"(",
"t",
")",
":",
"# return value or None.",
"t",
"[",
"0",
"]",
"=",
"t",
"[",
"1",
"]",
"# Note this must be unsigned",
"value",
"=",
"t",
"[",
"0",
"]",
"if",
"value",
"is",
"None",
"or",
"value",
"[",
"0",
"]",
".",
"isdigit",
"(",
")",
":",
"return",
"msg",
"=",
"''",
"if",
"value",
"[",
"0",
"]",
"==",
"'-'",
":",
"msg",
"=",
"\"Can't use negative index %s\"",
"%",
"value",
"elif",
"value",
"not",
"in",
"name_dict",
":",
"msg",
"=",
"\"Can't derefence index %s\"",
"%",
"value",
"else",
":",
"data",
"=",
"name_dict",
"[",
"value",
"]",
"if",
"data",
".",
"type",
"!=",
"'const'",
":",
"msg",
"=",
"\"Can't use non-constant %s %s as index\"",
"%",
"(",
"data",
".",
"type",
",",
"value",
")",
"elif",
"not",
"data",
".",
"positive",
":",
"msg",
"=",
"\"Can't use negative index %s\"",
"%",
"value",
"if",
"msg",
":",
"global",
"error_occurred",
"error_occurred",
"=",
"True",
"print",
"(",
"u\"ERROR - {0:s} near line {1:d}\"",
".",
"format",
"(",
"msg",
",",
"t",
".",
"lineno",
"(",
"1",
")",
")",
")"
] | optional_value : value
| empty | [
"optional_value",
":",
"value",
"|",
"empty"
] | train | https://github.com/StellarCN/py-stellar-base/blob/cce2e782064fb3955c85e1696e630d67b1010848/stellar_base/stellarxdr/xdrgen.py#L414-L437 | 0.001253 |
mojaie/chorus | chorus/util/debug.py | mute | def mute(func):
""" Decorator
Make stdout silent
"""
def _f(*args, **kwargs):
sys.stdout = open(os.devnull, 'w')
res = func(*args, **kwargs)
sys.stdout.close()
sys.stdout = sys.__stdout__
return res
return _f | python | def mute(func):
""" Decorator
Make stdout silent
"""
def _f(*args, **kwargs):
sys.stdout = open(os.devnull, 'w')
res = func(*args, **kwargs)
sys.stdout.close()
sys.stdout = sys.__stdout__
return res
return _f | [
"def",
"mute",
"(",
"func",
")",
":",
"def",
"_f",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"sys",
".",
"stdout",
"=",
"open",
"(",
"os",
".",
"devnull",
",",
"'w'",
")",
"res",
"=",
"func",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"sys",
".",
"stdout",
".",
"close",
"(",
")",
"sys",
".",
"stdout",
"=",
"sys",
".",
"__stdout__",
"return",
"res",
"return",
"_f"
] | Decorator
Make stdout silent | [
"Decorator",
"Make",
"stdout",
"silent"
] | train | https://github.com/mojaie/chorus/blob/fc7fe23a0272554c67671645ab07830b315eeb1b/chorus/util/debug.py#L97-L107 | 0.003731 |
PyCQA/pydocstyle | src/pydocstyle/config.py | ConfigurationParser._get_checked_errors | def _get_checked_errors(cls, options):
"""Extract the codes needed to be checked from `options`."""
checked_codes = cls._get_exclusive_error_codes(options)
if checked_codes is None:
checked_codes = cls.DEFAULT_CONVENTION
cls._set_add_options(checked_codes, options)
return checked_codes | python | def _get_checked_errors(cls, options):
"""Extract the codes needed to be checked from `options`."""
checked_codes = cls._get_exclusive_error_codes(options)
if checked_codes is None:
checked_codes = cls.DEFAULT_CONVENTION
cls._set_add_options(checked_codes, options)
return checked_codes | [
"def",
"_get_checked_errors",
"(",
"cls",
",",
"options",
")",
":",
"checked_codes",
"=",
"cls",
".",
"_get_exclusive_error_codes",
"(",
"options",
")",
"if",
"checked_codes",
"is",
"None",
":",
"checked_codes",
"=",
"cls",
".",
"DEFAULT_CONVENTION",
"cls",
".",
"_set_add_options",
"(",
"checked_codes",
",",
"options",
")",
"return",
"checked_codes"
] | Extract the codes needed to be checked from `options`. | [
"Extract",
"the",
"codes",
"needed",
"to",
"be",
"checked",
"from",
"options",
"."
] | train | https://github.com/PyCQA/pydocstyle/blob/2549847f9efad225789f931e83dfe782418ca13e/src/pydocstyle/config.py#L476-L484 | 0.005882 |
marshallward/f90nml | f90nml/namelist.py | Namelist.column_width | def column_width(self, width):
"""Validate and set the column width."""
if isinstance(width, int):
if width >= 0:
self._column_width = width
else:
raise ValueError('Column width must be nonnegative.')
else:
raise TypeError('Column width must be a nonnegative integer.') | python | def column_width(self, width):
"""Validate and set the column width."""
if isinstance(width, int):
if width >= 0:
self._column_width = width
else:
raise ValueError('Column width must be nonnegative.')
else:
raise TypeError('Column width must be a nonnegative integer.') | [
"def",
"column_width",
"(",
"self",
",",
"width",
")",
":",
"if",
"isinstance",
"(",
"width",
",",
"int",
")",
":",
"if",
"width",
">=",
"0",
":",
"self",
".",
"_column_width",
"=",
"width",
"else",
":",
"raise",
"ValueError",
"(",
"'Column width must be nonnegative.'",
")",
"else",
":",
"raise",
"TypeError",
"(",
"'Column width must be a nonnegative integer.'",
")"
] | Validate and set the column width. | [
"Validate",
"and",
"set",
"the",
"column",
"width",
"."
] | train | https://github.com/marshallward/f90nml/blob/4932cabc5221afc844ee6a5b4a05ceb8bd4a2711/f90nml/namelist.py#L158-L166 | 0.00554 |
serhatbolsu/robotframework-appiumlibrary | AppiumLibrary/keywords/_keyevent.py | _KeyeventKeywords.long_press_keycode | def long_press_keycode(self, keycode, metastate=None):
"""Sends a long press of keycode to the device.
Android only.
See `press keycode` for more details.
"""
driver = self._current_application()
driver.long_press_keycode(int(keycode), metastate) | python | def long_press_keycode(self, keycode, metastate=None):
"""Sends a long press of keycode to the device.
Android only.
See `press keycode` for more details.
"""
driver = self._current_application()
driver.long_press_keycode(int(keycode), metastate) | [
"def",
"long_press_keycode",
"(",
"self",
",",
"keycode",
",",
"metastate",
"=",
"None",
")",
":",
"driver",
"=",
"self",
".",
"_current_application",
"(",
")",
"driver",
".",
"long_press_keycode",
"(",
"int",
"(",
"keycode",
")",
",",
"metastate",
")"
] | Sends a long press of keycode to the device.
Android only.
See `press keycode` for more details. | [
"Sends",
"a",
"long",
"press",
"of",
"keycode",
"to",
"the",
"device",
"."
] | train | https://github.com/serhatbolsu/robotframework-appiumlibrary/blob/91c808cf0602af6be8135ac529fa488fded04a85/AppiumLibrary/keywords/_keyevent.py#L35-L43 | 0.006757 |
jorahn/icy | icy/ext/xml2json.py | json2elem | def json2elem(json_data, factory=ET.Element):
"""Convert a JSON string into an Element.
Whatever Element implementation we could import will be used by
default; if you want to use something else, pass the Element class
as the factory parameter.
"""
return internal_to_elem(json.loads(json_data), factory) | python | def json2elem(json_data, factory=ET.Element):
"""Convert a JSON string into an Element.
Whatever Element implementation we could import will be used by
default; if you want to use something else, pass the Element class
as the factory parameter.
"""
return internal_to_elem(json.loads(json_data), factory) | [
"def",
"json2elem",
"(",
"json_data",
",",
"factory",
"=",
"ET",
".",
"Element",
")",
":",
"return",
"internal_to_elem",
"(",
"json",
".",
"loads",
"(",
"json_data",
")",
",",
"factory",
")"
] | Convert a JSON string into an Element.
Whatever Element implementation we could import will be used by
default; if you want to use something else, pass the Element class
as the factory parameter. | [
"Convert",
"a",
"JSON",
"string",
"into",
"an",
"Element",
"."
] | train | https://github.com/jorahn/icy/blob/d0bd765c933b2d9bff4d7d646c0938348b9c5c25/icy/ext/xml2json.py#L161-L170 | 0.003021 |