complexity
int64
1
56
n_identifiers
int64
1
114
code
stringlengths
19
12.7k
path
stringlengths
8
134
n_ast_nodes
int64
12
2.35k
ast_errors
stringlengths
0
4.01k
repo
stringlengths
3
28
documentation
dict
n_words
int64
2
866
language
stringclasses
1 value
vocab_size
int64
2
323
commit_id
stringlengths
40
40
file_name
stringlengths
5
79
id
int64
243
338k
nloc
int64
1
228
token_counts
int64
5
1.4k
fun_name
stringlengths
1
77
url
stringlengths
31
60
commit_message
stringlengths
3
15.3k
n_whitespaces
int64
1
3.23k
n_ast_errors
int64
0
20
d_id
int64
74
121k
ast_levels
int64
4
29
10
21
def show(*names, **kwargs): kwargs = salt.utils.args.clean_kwargs(**kwargs) refresh = kwargs.pop("refresh", False) filter_ = salt.utils.args.split_input( kwargs.pop("filter", []), lambda x: str(x) if not isinstance(x, str) else x.lower(), ) if kwargs: salt.utils.args.invalid_kwargs(kwargs) if refresh: refresh_db() if not names: return {} result = _call_apt(["apt-cache", "show"] + list(names), scope=False)
salt/modules/aptpkg.py
200
salt
{ "docstring": "\n .. versionadded:: 2019.2.0\n\n Runs an ``apt-cache show`` on the passed package names, and returns the\n results in a nested dictionary. The top level of the return data will be\n the package name, with each package name mapping to a dictionary of version\n numbers to any additional information returned by ``apt-cache show``.\n\n filter\n An optional comma-separated list (or quoted Python list) of\n case-insensitive keys on which to filter. This allows one to restrict\n the information returned for each package to a smaller selection of\n pertinent items.\n\n refresh : False\n If ``True``, the apt cache will be refreshed first. By default, no\n refresh is performed.\n\n CLI Examples:\n\n .. code-block:: bash\n\n salt myminion pkg.show gawk\n salt myminion pkg.show 'nginx-*'\n salt myminion pkg.show 'nginx-*' filter=description,provides\n ", "language": "en", "n_whitespaces": 215, "n_words": 121, "vocab_size": 88 }
43
Python
36
f2a783643de61cac1ff3288b40241e5ce6e1ddc8
aptpkg.py
215,944
34
225
show
https://github.com/saltstack/salt.git
Update to latest ``pyupgrade`` hook. Stop skipping it on CI. Signed-off-by: Pedro Algarvio <[email protected]>
105
0
54,267
13
12
34
def gaunt(l_1, l_2, l_3, m_1, m_2, m_3, prec=None): r l_1, l_2, l_3, m_1, m_2, m_3 = [ as_int(i) for i in (l_1, l_2, l_3, m_1, m_2, m_3)] if l_1 + l_2 - l_3 < 0: return S.Zero if l_1 - l_2 + l_3 < 0: return S.Zero if -l_1 + l_2 + l_3 < 0: return S.Zero if (m_1 + m_2 + m_3) != 0: return S.Zero if (abs(m_1) > l_1) or (abs(m_2) > l_2) or (abs(m_3) > l_3): return S.Zero bigL, remL = divmod(l_1 + l_2 + l_3, 2) if remL % 2: return S.Zero imin = max(-l_3 + l_1 + m_2, -l_3 + l_2 - m_1, 0) imax = min(l_2 + m_2, l_1 - m_1, l_1 + l_2 - l_3) _calc_factlist(max(l_1 + l_2 + l_3 + 1, imax + 1)) ressqrt = sqrt((2 * l_1 + 1) * (2 * l_2 + 1) * (2 * l_3 + 1) * \ _Factlist[l_1 - m_1] * _Factlist[l_1 + m_1] * _Factlist[l_2 - m_2] * \ _Factlist[l_2 + m_2] * _Factlist[l_3 - m_3] * _Factlist[l_3 + m_3] / \ (4*pi)) prefac = Integer(_Factlist[bigL] * _Factlist[l_2 - l_1 + l_3] * _Factlist[l_1 - l_2 + l_3] * _Factlist[l_1 + l_2 - l_3])/ \ _Factlist[2 * bigL + 1]/ \ (_Factlist[bigL - l_1] * _Factlist[bigL - l_2] * _Factlist[bigL - l_3]) sumres = 0 for ii in range(int(imin), int(imax) + 1): den = _Factlist[ii] * _Factlist[ii + l_3 - l_1 - m_2] * \ _Factlist[l_2 + m_2 - ii] * _Factlist[l_1 - ii - m_1] * \ _Factlist[ii + l_3 - l_2 + m_1] * _Factlist[l_1 + l_2 - l_3 - ii] sumres = sumres + Integer((-1) ** ii) / den res = ressqrt * prefac * sumres * Integer((-1) ** (bigL + l_3 + m_1 - m_2)) if prec is not None: res = res.n(prec) return res
sympy/physics/wigner.py
736
sympy
{ "docstring": "\n Calculate the Gaunt coefficient.\n\n Explanation\n ===========\n\n The Gaunt coefficient is defined as the integral over three\n spherical harmonics:\n\n .. math::\n\n \\begin{aligned}\n \\operatorname{Gaunt}(l_1,l_2,l_3,m_1,m_2,m_3)\n &=\\int Y_{l_1,m_1}(\\Omega)\n Y_{l_2,m_2}(\\Omega) Y_{l_3,m_3}(\\Omega) \\,d\\Omega \\\\\n &=\\sqrt{\\frac{(2l_1+1)(2l_2+1)(2l_3+1)}{4\\pi}}\n \\operatorname{Wigner3j}(l_1,l_2,l_3,0,0,0)\n \\operatorname{Wigner3j}(l_1,l_2,l_3,m_1,m_2,m_3)\n \\end{aligned}\n\n Parameters\n ==========\n\n l_1, l_2, l_3, m_1, m_2, m_3 :\n Integer.\n prec - precision, default: ``None``.\n Providing a precision can\n drastically speed up the calculation.\n\n Returns\n =======\n\n Rational number times the square root of a rational number\n (if ``prec=None``), or real number if a precision is given.\n\n Examples\n ========\n\n >>> from sympy.physics.wigner import gaunt\n >>> gaunt(1,0,1,1,0,-1)\n -1/(2*sqrt(pi))\n >>> gaunt(1000,1000,1200,9,3,-12).n(64)\n 0.00689500421922113448...\n\n It is an error to use non-integer values for `l` and `m`::\n\n sage: gaunt(1.2,0,1.2,0,0,0)\n Traceback (most recent call last):\n ...\n ValueError: l values must be integer\n sage: gaunt(1,0,1,1.1,0,-1.1)\n Traceback (most recent call last):\n ...\n ValueError: m values must be integer\n\n Notes\n =====\n\n The Gaunt coefficient obeys the following symmetry rules:\n\n - invariant under any permutation of the columns\n\n .. math::\n \\begin{aligned}\n Y(l_1,l_2,l_3,m_1,m_2,m_3)\n &=Y(l_3,l_1,l_2,m_3,m_1,m_2) \\\\\n &=Y(l_2,l_3,l_1,m_2,m_3,m_1) \\\\\n &=Y(l_3,l_2,l_1,m_3,m_2,m_1) \\\\\n &=Y(l_1,l_3,l_2,m_1,m_3,m_2) \\\\\n &=Y(l_2,l_1,l_3,m_2,m_1,m_3)\n \\end{aligned}\n\n - invariant under space inflection, i.e.\n\n .. math::\n Y(l_1,l_2,l_3,m_1,m_2,m_3)\n =Y(l_1,l_2,l_3,-m_1,-m_2,-m_3)\n\n - symmetric with respect to the 72 Regge symmetries as inherited\n for the `3j` symbols [Regge58]_\n\n - zero for `l_1`, `l_2`, `l_3` not fulfilling triangle relation\n\n - zero for violating any one of the conditions: `l_1 \\ge |m_1|`,\n `l_2 \\ge |m_2|`, `l_3 \\ge |m_3|`\n\n - non-zero only for an even sum of the `l_i`, i.e.\n `L = l_1 + l_2 + l_3 = 2n` for `n` in `\\mathbb{N}`\n\n Algorithms\n ==========\n\n This function uses the algorithm of [Liberatodebrito82]_ to\n calculate the value of the Gaunt coefficient exactly. Note that\n the formula contains alternating sums over large factorials and is\n therefore unsuitable for finite precision arithmetic and only\n useful for a computer algebra system [Rasch03]_.\n\n Authors\n =======\n\n Jens Rasch (2009-03-24): initial version for Sage.\n ", "language": "en", "n_whitespaces": 669, "n_words": 295, "vocab_size": 202 }
306
Python
106
bcb817024d689b65db350a5a565c08f367b899ee
wigner.py
200,006
141
505
gaunt
https://github.com/sympy/sympy.git
Update wigner.py
517
0
49,490
20
1
8
def test_profile_typesafety(): with pytest.raises(TypeError, match="Invalid type for Profile"): ImageCms.ImageCmsProfile(0).tobytes() with pytest.raises(TypeError, match="Invalid type for Profile"): ImageCms.ImageCmsProfile(1).tobytes()
Tests/test_imagecms.py
90
Pillow
{ "docstring": "Profile init type safety\n\n prepatch, these would segfault, postpatch they should emit a typeerror\n ", "language": "en", "n_whitespaces": 20, "n_words": 14, "vocab_size": 14 }
16
Python
10
983a6139d57b37a883344972c6b1de50bb757de0
test_imagecms.py
243,028
5
49
test_profile_typesafety
https://github.com/python-pillow/Pillow.git
Check other exception messages
39
0
69,956
11
2
24
def _crop_source_faces(self): logger.debug("Updating source faces") self._faces = {} for image in self.source: detected_face = image["detected_faces"][0] src_img = image["image"] detected_face.load_aligned(src_img, size=self._size, centering=self._centering) matrix = detected_face.aligned.matrix self._faces.setdefault("filenames", []).append(os.path.splitext(image["filename"])[0]) self._faces.setdefault("matrix", []).append(matrix) self._faces.setdefault("src", []).append(transform_image(src_img, matrix, self._size, self._padding)) self.update_source = False logger.debug("Updated source faces")
tools/preview/preview.py
247
faceswap
{ "docstring": " Extract the source faces from the source frames, along with their filenames and the\n transformation matrix used to extract the faces. ", "language": "en", "n_whitespaces": 29, "n_words": 21, "vocab_size": 17 }
39
Python
33
71c20252c2e747f692289cdefe80ad0d5a456ea6
preview.py
100,516
17
150
_crop_source_faces
https://github.com/deepfakes/faceswap.git
bugfix: Preview Tool, ensure all config items are written
396
0
19,982
14
8
11
def partition_spans_by_kind(traces): server_spans = [] client_spans = [] internal_spans = [] for trace in traces: for span in trace['spans']: for tag in span['tags']: if 'span.kind' == tag.get('key', ''): span_kind = tag.get('value', '') if 'server' == span_kind: server_spans.append(span) elif 'client' == span_kind: client_spans.append(span) elif 'internal' == span_kind: internal_spans.append(span) return (server_spans, client_spans, internal_spans)
tests/integration/instrumentation/__init__.py
180
jina
{ "docstring": "Returns three lists each containing spans of kind SpanKind.SERVER, SpanKind.CLIENT and SpandKind.INTERNAL", "language": "en", "n_whitespaces": 11, "n_words": 12, "vocab_size": 12 }
51
Python
35
107631e955b21db8a4ddb3bee02130de3650d032
__init__.py
13,265
16
102
partition_spans_by_kind
https://github.com/jina-ai/jina.git
feat(instrumentation): add OpenTelemetry tracing and metrics with basic configurations (#5175)
247
0
2,592
18
2
5
def PreStem(name=None): if name is None: name = "prestem" + str(backend.get_uid("prestem"))
keras/applications/convnext.py
49
keras
{ "docstring": "Normalizes inputs with ImageNet-1k mean and std.\n\n Args:\n name (str): Name prefix.\n\n Returns:\n A presemt function.\n ", "language": "en", "n_whitespaces": 25, "n_words": 16, "vocab_size": 16 }
11
Python
10
2d1086447a25d281f9428832d046c473d80ad761
convnext.py
269,281
5
30
PreStem
https://github.com/keras-team/keras.git
Corrected preprocess_input docstring in regnet.py and convnext.py
24
0
80,000
14
1
6
async def get_users_expiring_soon(self) -> List[Tuple[str, int]]:
synapse/storage/databases/main/registration.py
27
synapse
{ "docstring": "Selects users whose account will expire in the [now, now + renew_at] time\n window (see configuration for account_validity for information on what renew_at\n refers to).\n\n Returns:\n A list of tuples, each with a user ID and expiration time (in milliseconds).\n ", "language": "en", "n_whitespaces": 79, "n_words": 40, "vocab_size": 38 }
6
Python
6
1783156dbcf4164692e66275d1c29857c434995b
registration.py
248,020
15
47
get_users_expiring_soon
https://github.com/matrix-org/synapse.git
Add some type hints to datastore (#12423) * Add some type hints to datastore * newsfile * change `Collection` to `List` * refactor return type of `select_users_txn` * correct type hint in `stream.py` * Remove `Optional` in `select_users_txn` * remove not needed return type in `__init__` * Revert change in `get_stream_id_for_event_txn` * Remove import from `Literal`
13
0
72,051
6
7
14
def get_topic_progress(topic, course_name, program): student = get_current_student() if not student: return None course_enrollment = get_or_create_course_enrollment(course_name, program) progress = student.get_topic_progress(course_enrollment.name, topic) if not progress: return None count = sum([activity["is_complete"] for activity in progress]) if count == 0: return {"completed": False, "started": False} elif count == len(progress): return {"completed": True, "started": True} elif count < len(progress): return {"completed": False, "started": True}
erpnext/education/utils.py
188
erpnext
{ "docstring": "\n\tReturn the porgress of a course in a program as well as the content to continue from.\n\t :param topic_name:\n\t :param course_name:\n\t", "language": "en", "n_whitespaces": 34, "n_words": 21, "vocab_size": 17 }
59
Python
36
494bd9ef78313436f0424b918f200dab8fc7c20b
utils.py
65,962
15
113
get_topic_progress
https://github.com/frappe/erpnext.git
style: format code with black
44
0
14,072
11
1
9
def subscription_app_deleted_webhook(subscription_webhook): return subscription_webhook( APP_DELETED_SUBSCRIPTION_QUERY, WebhookEventAsyncType.APP_DELETED ) APP_STATUS_CHANGED_SUBSCRIPTION_QUERY = ( APP_DETAILS_FRAGMENT + ) @pytest.fixture
saleor/plugins/webhook/tests/subscription_webhooks/fixtures.py
42
@pytest.fixture
saleor
{ "docstring": "\n subscription{\n event{\n ...on AppStatusChanged{\n app{\n ...AppDetails\n }\n }\n }\n }\n", "language": "en", "n_whitespaces": 69, "n_words": 10, "vocab_size": 7 }
14
Python
13
b5e414c98a1535d287721c859994424cf0eea081
fixtures.py
27,099
4
14
subscription_app_deleted_webhook
https://github.com/saleor/saleor.git
New events related to apps changes. (#9698) * New events related to apps changes. * Schema update after rebase * CHANGELOG.md update * New events description fix * Missing app event added to CHANGELOG.md
30
1
5,092
8
1
4
def entity_id(self) -> str: # type: ignore[override] return self._entity_id
homeassistant/helpers/template.py
23
core
{ "docstring": "Wrap State.entity_id.\n\n Intentionally does not collect state\n ", "language": "en", "n_whitespaces": 21, "n_words": 7, "vocab_size": 7 }
9
Python
9
aa02a53ac667d08c66a536baf139993bcfe4d7d6
template.py
291,227
6
12
entity_id
https://github.com/home-assistant/core.git
Add type hints to template states (#82582) * Add type hints to template states * Undo rename * Remove invalid mypy issue link
24
0
90,337
6
2
16
def compute_or_load(self, wav_file): pitch_file = self.create_pitch_file_path(wav_file, self.cache_path) if not os.path.exists(pitch_file): pitch = self._compute_and_save_pitch(self.ap, wav_file, pitch_file) else: pitch = np.load(pitch_file) return pitch.astype(np.float32)
TTS/tts/datasets/dataset.py
102
TTS
{ "docstring": "\n compute pitch and return a numpy array of pitch values\n ", "language": "en", "n_whitespaces": 25, "n_words": 10, "vocab_size": 9 }
21
Python
18
176b712c1a40cf630da9a77f1826836723c40fde
dataset.py
262,049
7
64
compute_or_load
https://github.com/coqui-ai/TTS.git
Refactor TTSDataset ⚡️
78
0
77,108
11
1
35
def test_enqueue_task_instances_sets_ti_state_to_None_if_dagrun_in_finish_state(self, state, dag_maker): dag_id = 'SchedulerJobTest.test_enqueue_task_instances_with_queued_state' task_id_1 = 'dummy' session = settings.Session() with dag_maker(dag_id=dag_id, start_date=DEFAULT_DATE, session=session): task1 = EmptyOperator(task_id=task_id_1) self.scheduler_job = SchedulerJob(subdir=os.devnull) dr1 = dag_maker.create_dagrun(state=state) ti = dr1.get_task_instance(task1.task_id, session) ti.state = State.SCHEDULED session.merge(ti) session.commit() with patch.object(BaseExecutor, 'queue_command') as mock_queue_command: self.scheduler_job._enqueue_task_instances_with_queued_state([ti]) ti.refresh_from_db() assert ti.state == State.NONE mock_queue_command.assert_not_called()
tests/jobs/test_scheduler_job.py
233
airflow
{ "docstring": "This tests that task instances whose dagrun is in finished state are not queued", "language": "en", "n_whitespaces": 13, "n_words": 14, "vocab_size": 14 }
47
Python
38
49e336ae0302b386a2f47269a6d13988382d975f
test_scheduler_job.py
47,522
17
139
test_enqueue_task_instances_sets_ti_state_to_None_if_dagrun_in_finish_state
https://github.com/apache/airflow.git
Replace usage of `DummyOperator` with `EmptyOperator` (#22974) * Replace usage of `DummyOperator` with `EmptyOperator`
174
0
9,144
11
3
20
def _class_means(X, y): xp, is_array_api = get_namespace(X) classes, y = xp.unique_inverse(y) means = xp.zeros(shape=(classes.shape[0], X.shape[1])) if is_array_api: for i in range(classes.shape[0]): means[i, :] = xp.mean(X[y == i], axis=0) else: # TODO: Explore the choice of using bincount + add.at as it seems sub optimal # from a performance-wise cnt = np.bincount(y) np.add.at(means, y, X) means /= cnt[:, None] return means
sklearn/discriminant_analysis.py
186
scikit-learn
{ "docstring": "Compute class means.\n\n Parameters\n ----------\n X : array-like of shape (n_samples, n_features)\n Input data.\n\n y : array-like of shape (n_samples,) or (n_samples, n_targets)\n Target values.\n\n Returns\n -------\n means : array-like of shape (n_classes, n_features)\n Class means.\n ", "language": "en", "n_whitespaces": 81, "n_words": 36, "vocab_size": 25 }
60
Python
53
2710a9e7eefd2088ce35fd2fb6651d5f97e5ef8b
discriminant_analysis.py
261,005
12
121
_class_means
https://github.com/scikit-learn/scikit-learn.git
ENH Adds Array API support to LinearDiscriminantAnalysis (#22554) Co-authored-by: Olivier Grisel <[email protected]> Co-authored-by: Julien Jerphanion <[email protected]>
134
0
76,620
14
4
9
def degree_centrality(G): if len(G) <= 1: return {n: 1 for n in G} s = 1.0 / (len(G) - 1.0) centrality = {n: d * s for n, d in G.degree()} return centrality @not_implemented_for("undirected")
networkx/algorithms/centrality/degree_alg.py
102
@not_implemented_for("undirected")
networkx
{ "docstring": "Compute the degree centrality for nodes.\n\n The degree centrality for a node v is the fraction of nodes it\n is connected to.\n\n Parameters\n ----------\n G : graph\n A networkx graph\n\n Returns\n -------\n nodes : dictionary\n Dictionary of nodes with degree centrality as the value.\n\n Examples\n --------\n >>> G = nx.Graph([(0, 1), (0, 2), (0, 3), (1, 2), (1, 3)])\n >>> nx.degree_centrality(G)\n {0: 1.0, 1: 1.0, 2: 0.6666666666666666, 3: 0.6666666666666666}\n\n See Also\n --------\n betweenness_centrality, load_centrality, eigenvector_centrality\n\n Notes\n -----\n The degree centrality values are normalized by dividing by the maximum\n possible degree in a simple graph n-1 where n is the number of nodes in G.\n\n For multigraphs or graphs with self loops the maximum degree might\n be higher than n-1 and values of degree centrality greater than 1\n are possible.\n ", "language": "en", "n_whitespaces": 212, "n_words": 129, "vocab_size": 85 }
34
Python
26
b8d1438e4ea3d8190c650110b3b7d7c141224842
degree_alg.py
176,972
6
61
degree_centrality
https://github.com/networkx/networkx.git
added examples to degree_alg.py (#5644) * added example on degree centrality * added example on in degree centrality * added example on out degree centrality * added opening braces
55
1
42,200
11
2
6
def get_registered_name(obj): if obj in _GLOBAL_CUSTOM_NAMES: return _GLOBAL_CUSTOM_NAMES[obj] else: return obj.__name__ @tf_contextlib.contextmanager
keras/utils/generic_utils.py
45
@tf_contextlib.contextmanager
keras
{ "docstring": "Returns the name registered to an object within the Keras framework.\n\n This function is part of the Keras serialization and deserialization\n framework. It maps objects to the string names associated with those objects\n for serialization/deserialization.\n\n Args:\n obj: The object to look up.\n\n Returns:\n The name associated with the object, or the default Python name if the\n object is not registered.\n ", "language": "en", "n_whitespaces": 95, "n_words": 60, "vocab_size": 41 }
12
Python
11
84afc5193d38057e2e2badf9c889ea87d80d8fbf
generic_utils.py
276,868
5
22
get_registered_name
https://github.com/keras-team/keras.git
Reformatting the codebase with black. PiperOrigin-RevId: 450093126
34
1
81,767
9
3
8
async def notify_clients(cls) -> None: while not cls.STOP: await asyncio.sleep(cls.UPDATE_INTERVALS) if cls.EVENT_QUEUE: await cls.broadcast_estimations()
gradio/event_queue.py
61
gradio
{ "docstring": "\n Notify clients about events statuses in the queue periodically.\n ", "language": "en", "n_whitespaces": 24, "n_words": 9, "vocab_size": 9 }
14
Python
13
b1dfc9a172440e9c9736566f326ba339ff559604
event_queue.py
180,689
8
34
notify_clients
https://github.com/gradio-app/gradio.git
Release new queue beta (#1969) * queue-refactor-backend (#1489) * queue-refactor-backend - create a template for the new design * queue-refactor-backend - clean after the old queue * queue-refactor-backend - add basic test to websocket endpoint * queue-refactor-backend - small fix * queue-refactor-backend - debugs&fixes&finalizations - test the flow with postman * queue-refactor-backend - tweaks on websocket closing * queue-refactor-backend - cleanup * queue-refactor-backend - cleanup & tweaks * queue-refactor-backend - cleanup & tweaks * queue-refactor-backend - cleanup & tweaks - correct the exception handling * queue-refactor-backend - add websockets dependency * queue-refactor-backend - reformat * queue-refactor-backend - add single event test * queue-refactor-backend - tweaks - remove outdated tests * queue-refactor-backend - reformat * queue-refactor-backend - reformat * queue-refactor-backend - reformat * queue-refactor-backend - add Queue configurations to Blocks.launch() - add live_queue_update to send estimations whenever a job gets fetched from the Queue * queue-refactor-backend - add Queue configurations to Blocks.launch() - add live_queue_update to send estimations whenever a job gets fetched from the Queue * queue-refactor-backend - tweaks * queue-refactor-backend - make SLEEP_WHEN_FREE shorter Co-authored-by: Ali Abid <[email protected]> * Add estimation parameters to queue (#1889) * - tweaks on Estimation * version * Revert "version" This reverts commit bd1f4d7bfe3658a4967b93126859a62a511a70e2. * some fix and tweaks * implement queue frontend (#1950) * implement queue frontend * fix types * fix ws endpoint in build mode * cleanup * Queue tweaks (#1909) * tweaks on estimation payload * Queue keep ws connections open (#1910) * 1. keep ws connections open after the event process is completed 2. do not send estimations periodically if live queue updates is open * fix calculation * 1. tweaks on event_queue * fix issue - create new ws for each request * format * fix * fix tests * fix tests * tets * test * changes * changes * changes * change' * wtf * changes * changes * file perms * Release queue beta v1 (#1971) * - release the new queue * - bypass the issue in the tests - rewrite the lost part in the codebase * - add concurrent queue example (#1978) * rank_eta calc * Queue fixes (#1981) * change * format * - comment out queue tests as they dont work well * - reformat * Update gradio/event_queue.py Co-authored-by: Ömer Faruk Özdemir <[email protected]> * changes * changes * change * weird fix Co-authored-by: Ömer Faruk Özdemir <[email protected]> * release-queue-v3 (#1988) * Fix frontend queuing to target secure WSS (#1996) * change * format * changes * queue-concurrency-tweaks (#2002) 1. make gather_data and broadcast_estimation sequential instead of concurrent because they were deleting elements at the same time and raising expections which was lowering the performance * Update Queue API, documentation (#2026) * changes * changes * fixes * changes * change * fix Co-authored-by: Ömer Faruk Özdemir <[email protected]> Co-authored-by: pngwn <[email protected]>
65
0
43,215
12
3
10
def scan_with_get_slave_id(self): # type: () -> List[XCPScannerResult] log_automotive.info("Start scan with GetSlaveId id in range: " + str( self.id_range)) for identifier in self.id_range: ids = self._send_get_slave_id(identifier) if len(ids) > 0: return ids return []
scapy/contrib/automotive/xcp/scanner.py
81
scapy
{ "docstring": "Starts the scan for XCP devices on CAN with the transport specific\n GetSlaveId Message", "language": "en", "n_whitespaces": 20, "n_words": 14, "vocab_size": 13 }
33
Python
30
495b21f2867e48286767085c8cf2918e4092e9dc
scanner.py
209,602
8
47
scan_with_get_slave_id
https://github.com/secdev/scapy.git
Add Automotive Logger for all debug outputs of the automotive layer
116
0
52,744
11
7
17
def all_view_models() -> List[Path]: file_list = [] all_files = os.walk(base_path) for root, _, files in all_files: for filename in files: if filename.endswith(".py"): if "view" in filename or "model" in filename: file_list.append(f"{root}/{filename}") clean_list = set(file_list) return [Path(x) for x in clean_list]
openbb_terminal/core/scripts/sdk_audit.py
137
OpenBBTerminal
{ "docstring": "Geta all files with 'view' or 'model' in the name.\n\n Returns:\n ----------\n List[Path]\n All paths in openbb_terminal with 'view' or 'model' in the name\n ", "language": "en", "n_whitespaces": 43, "n_words": 24, "vocab_size": 17 }
40
Python
30
963ca9b2b924d0514e0e65243dc8d9d7af023ad1
sdk_audit.py
286,656
17
77
all_view_models
https://github.com/OpenBB-finance/OpenBBTerminal.git
Audit SDK and View/Model functions (#3384) * Initial commit * Finalized functionality * update script * Allow using it without forecasting * Update gitignore * Update `sdk_audit.py` * Fixed issues, found more * Added fix for helper functions, and column for SDK type * Checked one more thing * Moved file * Move files ending with models/views * Added fix of name * Added file path fixes * Patch to fix sdk_audit for windows * fix Co-authored-by: Chavithra PARANA <[email protected]>
110
0
85,961
17
7
55
def _get_textdoc(self, index): assert self._opt is not None # FIXME we probably should do eliding here. See # qcommonstyle.cpp:viewItemDrawText # https://github.com/qutebrowser/qutebrowser/issues/118 text_option = QTextOption() if self._opt.features & QStyleOptionViewItem.WrapText: text_option.setWrapMode(QTextOption.WordWrap) else: text_option.setWrapMode(QTextOption.ManualWrap) text_option.setTextDirection(self._opt.direction) text_option.setAlignment(QStyle.visualAlignment( self._opt.direction, self._opt.displayAlignment)) if self._doc is not None: self._doc.deleteLater() self._doc = QTextDocument(self) self._doc.setDefaultFont(self._opt.font) self._doc.setDefaultTextOption(text_option) self._doc.setDocumentMargin(2) if index.parent().isValid(): view = self.parent() assert isinstance(view, completionwidget.CompletionView), view pattern = view.pattern columns_to_filter = index.model().columns_to_filter(index) if index.column() in columns_to_filter and pattern: if self._opt.state & QStyle.State_Selected: color = config.val.colors.completion.item.selected.match.fg else: color = config.val.colors.completion.match.fg _Highlighter(self._doc, pattern, color) self._doc.setPlainText(self._opt.text) else: self._doc.setHtml( '<span style="font: {};">{}</span>'.format( html.escape(config.val.fonts.completion.category), html.escape(self._opt.text)))
qutebrowser/completion/completiondelegate.py
469
qutebrowser
{ "docstring": "Create the QTextDocument of an item.\n\n Args:\n index: The QModelIndex of the item to draw.\n ", "language": "en", "n_whitespaces": 40, "n_words": 15, "vocab_size": 13 }
90
Python
68
a20bb67a878b2e68abf8268c1b0a27f018d01352
completiondelegate.py
320,770
33
292
_get_textdoc
https://github.com/qutebrowser/qutebrowser.git
mypy: Upgrade to PyQt5-stubs 5.15.6.0 For some unknown reason, those new stubs cause a *lot* of things now to be checked by mypy which formerly probably got skipped due to Any being implied somewhere. The stubs themselves mainly improved, with a couple of regressions too. In total, there were some 337 (!) new mypy errors. This commit fixes almost all of them, and the next commit improves a fix to get things down to 0 errors again. Overview of the changes: ==== qutebrowser/app.py - Drop type ignore due to improved stubs. ==== qutebrowser/browser/browsertab.py - Specify the type of _widget members more closely than just QWidget. This is debatable: I suppose the abstract stuff shouldn't need to know anything about the concrete backends at all. But it seems like we cut some corners when initially implementing things, and put some code in browsertab.py just because the APIs of both backends happened to be compatible. Perhaps something to reconsider once we drop QtWebKit and hopefully implement a dummy backend. - Add an additional assertion in AbstractAction.run_string. This is already covered by the isinstance(member, self.action_base) above it, but that's too dynamic for mypy to understand. - Fix the return type of AbstractScroller.pos_px, which is a QPoint (with x and y components), not a single int. - Fix the return type of AbstractScroller.pos_perc, which is a Tuple (with x and y components), not a single int. - Fix the argument types of AbstractScroller.to_perc, as it's possible to pass fractional percentages too. - Specify the type for AbstractHistoryPrivate._history. See above (_widget) re this being debatable. - Fix the return type of AbstractTabPrivate.event_target(), which can be None (see #3888). - Fix the return type of AbstractTabPrivate.run_js_sync, which is Any (the JS return value), not None. - Fix the argument type for AbstractTabPrivate.toggle_inspector: position can be None to use the last used position. - Declare the type of sub-objects of AbstractTab. - Fix the return value of AbstractTab.icon(), which is the QIcon, not None. ==== qutebrowser/browser/commands.py - Make sure the active window is a MainWindow (with a .win_id attribute). ==== qutebrowser/browser/downloadview.py - Add _model() which makes sure that self.model() is a DownloadModel, not None or any other model. This is needed because other methods access a variety of custom attributes on it, e.g. last_index(). ==== qutebrowser/browser/greasemonkey.py - Add an ignore for AbstractDownload.requested_url which we patch onto the downloads. Probably would be nicer to add it as a proper attribute which always gets set by the DownloadManager. ==== qutebrowser/browser/hints.py - Remove type ignores for QUrl.toString(). - Add a new type ignore for combining different URL flags (which works, but is not exactly type safe... still probably a regression in the stubs). - Make sure the things we get back from self._get_keyparser are what we actually expect. Probably should introduce a TypedDict (and/or overloads for _get_keyparser with typing.Literal) to teach mypy about the exact return value. See #7098. This is needed because we access Hint/NormalKeyParser-specific attributes such as .set_inhibited_timout() or .update_bindings(). ==== qutebrowser/browser/inspector.py - Similar changes than in browsertab.py to make some types where we share API (e.g. .setPage()) more concrete. Didn't work out unfortunately, see next commit. ==== qutebrowser/browser/network/pac.py - Remove now unneeded type ignore for signal. ==== qutebrowser/browser/qtnetworkdownloads.py - Make sure that downloads is a qtnetworkdownloads.DownloadItem (rather than an AbstractDownload), so that we can call ._uses_nam() on it. ==== qutebrowser/browser/qutescheme.py - Remove now unneeded type ignore for QUrl flags. ==== qutebrowser/browser/urlmarks.py - Specify the type of UrlMarkManager._lineparser, as those only get initialized in _init_lineparser of subclasses, so mypy doesn't know it's supposed to exist. ==== qutebrowser/browser/webelem.py - New casts to turn single KeyboardModifier (enum) entries into KeyboardModifiers (flags). Might not be needed anymore with Qt 6. - With that, casting the final value is now unneeded. ==== qutebrowser/browser/webengine/notification.py - Remove now unneeded type ignore for signal. - Make sure the self.sender() we get in HerbeNotificationAdapter._on_finished() is a QProcess, not just any QObject. ==== qutebrowser/browser/webengine/webenginedownloads.py - Remove now unneeded type ignores for signals. ==== qutebrowser/browser/webengine/webengineelem.py - Specify the type of WebEngineElement._tab. - Remove now unneeded type ignore for mixed flags. ==== qutebrowser/browser/webengine/webengineinspector.py - See changes to inspector.py and next commit. - Remove now unneeded type ignore for signal. ==== qutebrowser/browser/webengine/webenginequtescheme.py - Remove now unneeded type ignore for mixed flags. ==== qutebrowser/browser/webengine/webenginesettings.py - Ignore access of .setter attribute which we patch onto QWebEngineProfile. Would be nice to have a subclass or wrapper-class instead. ==== qutebrowser/browser/webengine/webenginetab.py - Specified the type of _widget members more closely than just QWidget. See browsertab.py changes for details. - Remove some now-unneeded type ignores for creating FindFlags. - Specify more concrete types for WebEngineTab members where we actually need to access WebEngine-specific attributes. - Make sure the page we get is our custom WebEnginePage subclass, not just any QWebEnginePage. This is needed because we access custom attributes on it. ==== qutebrowser/browser/webengine/webview.py - Make sure the page we get is our custom WebEnginePage subclass, not just any QWebEnginePage. This is needed because we access custom attributes on it. ==== qutebrowser/browser/webkit/network/networkreply.py - Remove now unneeded type ignores for signals. ==== qutebrowser/browser/webkit/webkitinspector.py - See changes to inspector.py and next commit. ==== qutebrowser/browser/webkit/webkittab.py - Specify the type of _widget members more closely than just QWidget. See browsertab.py changes for details. - Add a type ignore for WebKitAction because our workaround needs to treat them as ints (which is allowed by PyQt, even if not type-safe). - Add new ignores for findText calls: The text is a QString and can be None; the flags are valid despite mypy thinking they aren't (stubs regression?). - Specify the type for WebKitHistoryPrivate._history, because we access WebKit-specific attributes. See above (_widget) re this being debatable. - Make mypy aware that .currentFrame() and .frameAt() can return None (stubs regression?). - Make sure the .page() and .page().networkAccessManager() are our subclasses rather than the more generic QtWebKit objects, as we use custom attributes. - Add new type ignores for signals (stubs regression!) ==== qutebrowser/browser/webkit/webpage.py - Make sure the .networkAccessManager() is our subclass rather than the more generic QtWebKit object, as we use custom attributes. - Replace a cast by a type ignore. The cast didn't work anymore. ==== qutebrowser/browser/webkit/webview.py - Make sure the .page() is our subclass rather than the more generic QtWebKit object, as we use custom attributes. ==== qutebrowser/commands/userscripts.py - Remove now unneeded type ignore for signal. ==== qutebrowser/completion/completer.py - Add a new _completion() getter (which ensures it actually gets the completion view) rather than accessing the .parent() directly (which could be any QObject). ==== qutebrowser/completion/completiondelegate.py - Make sure self.parent() is a CompletionView (no helper method as there is only one instance). - Remove a now-unneeded type ignore for adding QSizes. ==== qutebrowser/completion/completionwidget.py - Add a ._model() getter which ensures that we get a CompletionModel (with custom attributes) rather than Qt's .model() which can be any QAbstractItemModel (or None). - Removed a now-unneeded type ignore for OR-ing flags. ==== qutebrowser/completion/models/completionmodel.py - Remove now unneeded type ignores for signals. - Ignore a complaint about .set_pattern() not being defined. Completion categories don't share any common parent class, so it would be good to introduce a typing.Protocol for this. See #7098. ==== qutebrowser/components/misccommands.py - Removed a now-unneeded type ignore for OR-ing flags. ==== qutebrowser/components/readlinecommands.py - Make sure QApplication.instance() is a QApplication (and not just a QCoreApplication). This includes the former "not None" check. ==== qutebrowser/components/scrollcommands.py - Add basic annotation for "funcs" dict. Could have a callable protocol to specify it needs a count kwarg, see #7098. ==== qutebrowser/config/stylesheet.py - Correctly specify that stylesheet apply to QWidgets, not any QObject. - Ignore an attr-defined for obj.STYLESHEET. Perhaps could somehow teach mypy about this with overloads and protocols (stylesheet for set_register being None => STYLESHEET needs to be defined, otherwise anything goes), but perhaps not worth the troble. See #7098. ==== qutebrowser/keyinput/keyutils.py - Remove some now-unneeded type ignores and add a cast for using a single enum value as flags. Might need to look at this again with Qt 6 support. ==== qutebrowser/keyinput/modeman.py - Add a FIXME for using a TypedDict, see comments for hints.py above. ==== qutebrowser/mainwindow/mainwindow.py - Remove now-unneeded type ignores for calling with OR-ed flags. - Improve where we cast from WindowType to WindowFlags, no int needed - Use new .tab_bar() getter, see below. ==== qutebrowser/mainwindow/prompt.py - Remove now-unneeded type ignores for calling with OR-ed flags. ==== qutebrowser/mainwindow/statusbar/bar.py - Adjust type ignores around @pyqtProperty. The fact one is still needed seems like a stub regression. ==== qutebrowser/mainwindow/statusbar/command.py - Fix type for setText() override (from QLineEdit): text can be None (QString in C++). ==== qutebrowser/mainwindow/statusbar/url.py - Adjust type ignores around @pyqtProperty. The fact one is still needed seems like a stub regression. ==== qutebrowser/mainwindow/tabbedbrowser.py - Specify that TabDeque manages browser tabs, not any QWidgets. It accesses AbstractTab-specific attributes. - Make sure that the .tabBar() we get is a tabwidget.TabBar, as we access .maybe_hide. - Fix the annotations for stored marks: Scroll positions are a QPoint, not int. - Add _current_tab() and _tab_by_idx() wrappers for .currentWidget() and .widget(), which ensures that the return values are valid AbstractTabs (or None for _tab_by_idx). This is needed because we access AbstractTab-specific attributes. - For some places, where the tab can be None, continue using .currentTab() but add asserts. - Remove some now-unneeded [unreachable] ignores, as mypy knows about the None possibility now. ==== qutebrowser/mainwindow/tabwidget.py - Add new tab_bar() and _tab_by_idx() helpers which check that the .tabBar() and .widget() are of type TabBar and AbstractTab, respectively. - Add additional assertions where we expect ._tab_by_idx() to never be None. - Remove dead code in get_tab_fields for handling a None y scroll position. I was unable to find any place in the code where this could be set to None. - Remove some now-unneeded type ignores and casts, as mypy now knows that _type_by_idx() could be None. - Work around a strange instance where mypy complains about not being able to find the type of TabBar.drag_in_progress from TabWidget._toggle_visibility, despite it clearly being shown as a bool *inside* that class without any annotation. - Add a ._tab_widget() getter in TabBar which ensures that the .parent() is in fact a TabWidget. ==== qutebrowser/misc/crashsignal.py - Remove now unneeded type ignores for signals. ==== qutebrowser/misc/editor.py - Remove now unneeded type ignores for signals. ==== qutebrowser/misc/ipc.py - Remove now unneeded type ignores for signals. - Add new type ignores for .error() which is both a signal and a getter (stub regression?). Won't be relevant for Qt 6 anymore, as the signal was renamed to errorOccurred in 5.15. ==== qutebrowser/misc/objects.py - Make sure mypy knows that objects.app is our custom Application (with custom attributes) rather than any QApplication. ==== qutebrowser/utils/objreg.py - Ignore attr-defined for .win_id attributes. Maybe could add a typing.Protocol, but ideally, the whole objreg stuff should die one day anyways. ==== tests/unit/completion/test_completer.py - Make CompletionWidgetStub inherit from CompletionView so that it passes the new isinstance() asserts in completer.py (see above).
466
0
117,338
19
1
4
def role(self) -> 'PeaRoleType': return self.args.pea_role
jina/peapods/peas/__init__.py
27
jina
{ "docstring": "Get the role of this pea in a pod\n .. #noqa: DAR201", "language": "en", "n_whitespaces": 18, "n_words": 12, "vocab_size": 12 }
6
Python
6
933415bfa1f9eb89f935037014dfed816eb9815d
__init__.py
9,851
4
14
role
https://github.com/jina-ai/jina.git
feat: star routing (#3900) * feat(proto): adjust proto for star routing (#3844) * feat(proto): adjust proto for star routing * feat(proto): generate proto files * feat(grpc): refactor grpclet interface (#3846) * feat: refactor connection pool for star routing (#3872) * feat(k8s): add more labels to k8s deployments * feat(network): refactor connection pool * feat(network): refactor k8s pool * feat: star routing graph gateway (#3877) * feat: star routing - refactor grpc data runtime (#3887) * feat(runtimes): refactor grpc dataruntime * fix(tests): adapt worker runtime tests * fix(import): fix import * feat(proto): enable sending multiple lists (#3891) * feat: star routing gateway (#3893) * feat: star routing gateway all protocols (#3897) * test: add streaming and prefetch tests (#3901) * feat(head): new head runtime for star routing (#3899) * feat(head): new head runtime * feat(head): new head runtime * style: fix overload and cli autocomplete * feat(network): improve proto comments Co-authored-by: Jina Dev Bot <[email protected]> * feat(worker): merge docs in worker runtime (#3905) * feat(worker): merge docs in worker runtime * feat(tests): assert after clean up * feat(tests): star routing runtime integration tests (#3908) * fix(tests): fix integration tests * test: test runtimes fast slow request (#3910) * feat(zmq): purge zmq, zed, routing_table (#3915) * feat(zmq): purge zmq, zed, routing_table * style: fix overload and cli autocomplete * feat(zmq): adapt comment in dependency list * style: fix overload and cli autocomplete * fix(tests): fix type tests Co-authored-by: Jina Dev Bot <[email protected]> * test: add test gateway to worker connection (#3921) * feat(pea): adapt peas for star routing (#3918) * feat(pea): adapt peas for star routing * style: fix overload and cli autocomplete * feat(pea): add tests * feat(tests): add failing head pea test Co-authored-by: Jina Dev Bot <[email protected]> * feat(tests): integration tests for peas (#3923) * feat(tests): integration tests for peas * feat(pea): remove _inner_pea function * feat: star routing container pea (#3922) * test: rescue tests (#3942) * fix: fix streaming tests (#3945) * refactor: move docker run to run (#3948) * feat: star routing pods (#3940) * feat(pod): adapt pods for star routing * feat(pods): adapt basepod to star routing * feat(pod): merge pod and compound pod * feat(tests): fix tests * style: fix overload and cli autocomplete * feat(test): add container pea int test * feat(ci): remove more unnecessary tests * fix(tests): remove jinad runtime * feat(ci): remove latency tracking * fix(ci): fix ci def * fix(runtime): enable runtime to be exited * fix(tests): wrap runtime test in process * fix(runtimes): remove unused runtimes * feat(runtimes): improve cancel wait * fix(ci): build test pip again in ci * fix(tests): fix a test * fix(test): run async in its own process * feat(pod): include shard in activate msg * fix(pea): dont join * feat(pod): more debug out * feat(grpc): manage channels properly * feat(pods): remove exitfifo * feat(network): add simple send retry mechanism * fix(network): await pool close * fix(test): always close grpc server in worker * fix(tests): remove container pea from tests * fix(tests): reorder tests * fix(ci): split tests * fix(ci): allow alias setting * fix(test): skip a test * feat(pods): address comments Co-authored-by: Jina Dev Bot <[email protected]> * test: unblock skipped test (#3957) * feat: jinad pea (#3949) * feat: jinad pea * feat: jinad pea * test: remote peas * test: toplogy tests with jinad * ci: parallel jobs * feat(tests): add pod integration tests (#3958) * feat(tests): add pod integration tests * fix(tests): make tests less flaky * fix(test): fix test * test(pea): remote pea topologies (#3961) * test(pea): remote pea simple topology * test: remote pea topologies * refactor: refactor streamer result handling (#3960) * feat(k8s): adapt K8s Pod for StarRouting (#3964) * test: optimize k8s test * test: increase timeout and use different namespace * test: optimize k8s test * test: build and load image when needed * test: refactor k8s test * test: fix image name error * test: fix k8s image load * test: fix typoe port expose * test: update tests in connection pool and handling * test: remove unused fixture * test: parameterize docker images * test: parameterize docker images * test: parameterize docker images * feat(k8s): adapt k8s pod for star routing * fix(k8s): dont overwrite add/remove function in pool * fix(k8s): some fixes * fix(k8s): some more fixes * fix(k8s): linting * fix(tests): fix tests * fix(tests): fix k8s unit tests * feat(k8s): complete k8s integration test * feat(k8s): finish k8s tests * feat(k8s): fix test * fix(tests): fix test with no name * feat(k8s): unify create/replace interface * feat(k8s): extract k8s port constants * fix(tests): fix tests * fix(tests): wait for runtime being ready in tests * feat(k8s): address comments Co-authored-by: bwanglzu <[email protected]> * feat(flow): adapt Flow for StarRouting (#3986) * feat(flow): add routes * feat(flow): adapt flow to star routing * style: fix overload and cli autocomplete * feat(flow): handle empty topologies * feat(k8s): allow k8s pool disabling * style: fix overload and cli autocomplete * fix(test): fix test with mock * fix(tests): fix more tests * feat(flow): clean up tests * style: fix overload and cli autocomplete * fix(tests): fix more tests * feat: add plot function (#3994) * fix(tests): avoid hanging tests * feat(flow): add type hinting * fix(test): fix duplicate exec name in test * fix(tests): fix more tests * fix(tests): enable jinad test again * fix(tests): random port fixture * fix(style): replace quotes Co-authored-by: Jina Dev Bot <[email protected]> Co-authored-by: Joan Fontanals <[email protected]> * feat(ci): bring back ci (#3997) * feat(ci): enable ci again * style: fix overload and cli autocomplete * feat(ci): add latency tracking * feat(ci): bring back some tests * fix(tests): remove invalid port test * feat(ci): disable daemon and distributed tests * fix(tests): fix entrypoint in hub test * fix(tests): wait for gateway to be ready * fix(test): fix more tests * feat(flow): do rolling update and scale sequentially * fix(tests): fix more tests * style: fix overload and cli autocomplete * feat: star routing hanging pods (#4011) * fix: try to handle hanging pods better * test: hanging pods test work * fix: fix topology graph problem * test: add unit test to graph * fix(tests): fix k8s tests * fix(test): fix k8s test * fix(test): fix k8s pool test * fix(test): fix k8s test * fix(test): fix k8s connection pool setting * fix(tests): make runtime test more reliable * fix(test): fix routes test * fix(tests): make rolling update test less flaky * feat(network): gurantee unique ports * feat(network): do round robin for shards * fix(ci): increase pytest timeout to 10 min Co-authored-by: Jina Dev Bot <[email protected]> Co-authored-by: Joan Fontanals <[email protected]> * fix(ci): fix ci file * feat(daemon): jinad pod for star routing * Revert "feat(daemon): jinad pod for star routing" This reverts commit ed9b37ac862af2e2e8d52df1ee51c0c331d76f92. * feat(daemon): remote jinad pod support (#4042) * feat(daemon): add pod tests for star routing * feat(daemon): add remote pod test * test(daemon): add remote pod arguments test * test(daemon): add async scale test * test(daemon): add rolling update test * test(daemon): fix host * feat(proto): remove message proto (#4051) * feat(proto): remove message proto * fix(tests): fix tests * fix(tests): fix some more tests * fix(tests): fix more tests * fix(tests): fix more tests * fix(tests): fix more tests * fix(tests): fix more tests * feat(proto): put docs back in data * fix(proto): clean up * feat(proto): clean up * fix(tests): skip latency tracking * fix(test): fix hub test * fix(tests): fix k8s test * fix(test): some test clean up * fix(style): clean up style issues * feat(proto): adjust for rebase * fix(tests): bring back latency tracking * fix(tests): fix merge accident * feat(proto): skip request serialization (#4074) * feat: add reduce to star routing (#4070) * feat: add reduce on shards to head runtime * test: add reduce integration tests with fixed order * feat: add reduce on needs * chore: get_docs_matrix_from_request becomes public * style: fix overload and cli autocomplete * docs: remove undeterministic results warning * fix: fix uses_after * test: assert correct num docs after reducing in test_external_pod * test: correct asserts after reduce in test_rolling_update * fix: no reduce if uses_after_address is set * fix: get_docs_from_request only if needed * fix: fix tests after merge * refactor: move reduce from data_request_handler to head * style: fix overload and cli autocomplete * chore: apply suggestions * fix: fix asserts * chore: minor test fix * chore: apply suggestions * test: remove flow tests with external executor (pea) * fix: fix test_expected_messages_routing * fix: fix test_func_joiner * test: adapt k8s test Co-authored-by: Jina Dev Bot <[email protected]> * fix(k8s): fix static pool config * fix: use custom protoc doc generator image (#4088) * fix: use custom protoc doc generator image * fix(docs): minor doc improvement * fix(docs): use custom image * fix(docs): copy docarray * fix: doc building local only * fix: timeout doc building * fix: use updated args when building ContainerPea * test: add container PeaFactory test * fix: force pea close on windows (#4098) * fix: dont reduce if uses exist (#4099) * fix: dont use reduce if uses exist * fix: adjust reduce tests * fix: adjust more reduce tests * fix: fix more tests * fix: adjust more tests * fix: ignore non jina resources (#4101) * feat(executor): enable async executors (#4102) * feat(daemon): daemon flow on star routing (#4096) * test(daemon): add remote flow test * feat(daemon): call scale in daemon * feat(daemon): remove tail args and identity * test(daemon): rename scalable executor * test(daemon): add a small delay in async test * feat(daemon): scale partial flow only * feat(daemon): call scale directly in partial flow store * test(daemon): use asyncio sleep * feat(daemon): enable flow level distributed tests * test(daemon): fix jinad env workspace config * test(daemon): fix pod test use new port rolling update * feat(daemon): enable distribuetd tests * test(daemon): remove duplicate tests and zed runtime test * test(daemon): fix stores unit test * feat(daemon): enable part of distributed tests * feat(daemon): enable part of distributed tests * test: correct test paths * test(daemon): add client test for remote flows * test(daemon): send a request with jina client * test(daemon): assert async generator * test(daemon): small interval between tests * test(daemon): add flow test for container runtime * test(daemon): add flow test for container runtime * test(daemon): fix executor name * test(daemon): fix executor name * test(daemon): use async client fetch result * test(daemon): finish container flow test * test(daemon): enable distributed in ci * test(daemon): enable distributed in ci * test(daemon): decare flows and pods * test(daemon): debug ci if else * test(daemon): debug ci if else * test(daemon): decare flows and pods * test(daemon): correct test paths * test(daemon): add small delay for async tests * fix: star routing fixes (#4100) * docs: update docs * fix: fix Request.__repr__ * docs: update flow remarks * docs: fix typo * test: add non_empty_fields test * chore: remove non_empty_fields test * feat: polling per endpoint (#4111) * feat(polling): polling per endpoint configurable * fix: adjust tests * feat(polling): extend documentation * style: fix overload and cli autocomplete * fix: clean up * fix: adjust more tests * fix: remove repeat from flaky test * fix: k8s test * feat(polling): address pr feedback * feat: improve docs Co-authored-by: Jina Dev Bot <[email protected]> * feat(grpc): support connect grpc server via ssl tunnel (#4092) * feat(grpc): support ssl grpc connect if port is 443 * fix(grpc): use https option instead of detect port automatically * chore: fix typo * fix: update jina/peapods/networking.py Co-authored-by: Joan Fontanals <[email protected]> * fix: update jina/peapods/networking.py Co-authored-by: Joan Fontanals <[email protected]> * fix: update jina/peapods/networking.py Co-authored-by: Joan Fontanals <[email protected]> * test(networking): add test for peapods networking * fix: address comments Co-authored-by: Joan Fontanals <[email protected]> * feat(polling): unify polling args (#4113) * fix: several issues for jinad pods (#4119) * fix: activate for jinad pods * fix: dont expose worker pod in partial daemon * fix: workspace setting * fix: containerized flows * fix: hub test * feat(daemon): remote peas on star routing (#4112) * test(daemon): fix request in peas * test(daemon): fix request in peas * test(daemon): fix sync async client test * test(daemon): enable remote peas test * test(daemon): replace send message to send request * test(daemon): declare pea tests in ci * test(daemon): use pea args fixture * test(daemon): head pea use default host * test(daemon): fix peas topologies * test(daemon): fix pseudo naming * test(daemon): use default host as host * test(daemon): fix executor path * test(daemon): add remote worker back * test(daemon): skip local remote remote topology * fix: jinad pea test setup * fix: jinad pea tests * fix: remove invalid assertion Co-authored-by: jacobowitz <[email protected]> * feat: enable daemon tests again (#4132) * feat: enable daemon tests again * fix: remove bogy empty script file * fix: more jinad test fixes * style: fix overload and cli autocomplete * fix: scale and ru in jinad * fix: fix more jinad tests Co-authored-by: Jina Dev Bot <[email protected]> * fix: fix flow test * fix: improve pea tests reliability (#4136) Co-authored-by: Joan Fontanals <[email protected]> Co-authored-by: Jina Dev Bot <[email protected]> Co-authored-by: Deepankar Mahapatro <[email protected]> Co-authored-by: bwanglzu <[email protected]> Co-authored-by: AlaeddineAbdessalem <[email protected]> Co-authored-by: Zhaofeng Miao <[email protected]>
20
0
1,730
7
1
7
def __reduce__(self): (serialized, _) = self._serialization_helper() # There is no outer object ref when the actor handle is # deserialized out-of-band using pickle. return ActorHandle._deserialization_helper, (serialized, None)
python/ray/actor.py
46
ray
{ "docstring": "This code path is used by pickling but not by Ray forking.", "language": "en", "n_whitespaces": 11, "n_words": 12, "vocab_size": 11 }
27
Python
24
f084546d41f0533c1e9e96a7249532d0eb4ff47d
actor.py
126,889
3
27
__reduce__
https://github.com/ray-project/ray.git
Fix out-of-band deserialization of actor handle (#27700) When we deserialize actor handle via pickle, we will register it with an outer object ref equaling to itself which is wrong. For out-of-band deserialization, there should be no outer object ref. Signed-off-by: Jiajun Yao <[email protected]>
62
0
28,292
8
11
20
def validate_parameter_constraints(parameter_constraints, params, caller_name): if len(set(parameter_constraints) - set(params)) != 0: raise ValueError( f"The parameter constraints {list(parameter_constraints)}" " contain unexpected parameters" f" {set(parameter_constraints) - set(params)}" ) for param_name, param_val in params.items(): # We allow parameters to not have a constraint so that third party estimators # can inherit from sklearn estimators without having to necessarily use the # validation tools. if param_name not in parameter_constraints: continue constraints = parameter_constraints[param_name] if constraints == "no_validation": continue constraints = [make_constraint(constraint) for constraint in constraints] for constraint in constraints: if constraint.is_satisfied_by(param_val): # this constraint is satisfied, no need to check further. break else: # No constraint is satisfied, raise with an informative message. # Ignore constraints that we don't want to expose in the error message, # i.e. options that are for internal purpose or not officially supported. constraints = [ constraint for constraint in constraints if not constraint.hidden ] if len(constraints) == 1: constraints_str = f"{constraints[0]}" else: constraints_str = ( f"{', '.join([str(c) for c in constraints[:-1]])} or" f" {constraints[-1]}" ) raise ValueError( f"The {param_name!r} parameter of {caller_name} must be" f" {constraints_str}. Got {param_val!r} instead." )
sklearn/utils/_param_validation.py
330
scikit-learn
{ "docstring": "Validate types and values of given parameters.\n\n Parameters\n ----------\n parameter_constraints : dict or {\"no_validation\"}\n If \"no_validation\", validation is skipped for this parameter.\n\n If a dict, it must be a dictionary `param_name: list of constraints`.\n A parameter is valid if it satisfies one of the constraints from the list.\n Constraints can be:\n - an Interval object, representing a continuous or discrete range of numbers\n - the string \"array-like\"\n - the string \"sparse matrix\"\n - the string \"random_state\"\n - callable\n - None, meaning that None is a valid value for the parameter\n - any type, meaning that any instance of this type is valid\n - a StrOptions object, representing a set of strings\n - the string \"boolean\"\n - the string \"verbose\"\n\n params : dict\n A dictionary `param_name: param_value`. The parameters to validate against the\n constraints.\n\n caller_name : str\n The name of the estimator or function or method that called this function.\n ", "language": "en", "n_whitespaces": 286, "n_words": 149, "vocab_size": 89 }
181
Python
117
91f02270a8f49e3e52882dc0fa634eff4d138fc8
_param_validation.py
260,513
32
137
validate_parameter_constraints
https://github.com/scikit-learn/scikit-learn.git
MAINT Add one-sided set differences for clarity in param validation (#23772) Co-authored-by: Jérémie du Boisberranger <[email protected]>
590
0
76,307
25
4
10
def from_bytes(b): if len(b) == 4: seconds = struct.unpack("!L", b)[0] nanoseconds = 0 elif len(b) == 8: data64 = struct.unpack("!Q", b)[0] seconds = data64 & 0x00000003FFFFFFFF nanoseconds = data64 >> 34 elif len(b) == 12: nanoseconds, seconds = struct.unpack("!Iq", b) else: raise ValueError( "Timestamp type can only be created from 32, 64, or 96-bit byte objects" ) return Timestamp(seconds, nanoseconds)
.venv/lib/python3.8/site-packages/pip/_vendor/msgpack/ext.py
159
transferlearning
{ "docstring": "Unpack bytes into a `Timestamp` object.\n\n Used for pure-Python msgpack unpacking.\n\n :param b: Payload from msgpack ext message with code -1\n :type b: bytes\n\n :returns: Timestamp object unpacked from msgpack ext payload\n :rtype: Timestamp\n ", "language": "en", "n_whitespaces": 76, "n_words": 34, "vocab_size": 27 }
60
Python
44
f638f5d0e6c8ebed0e69a6584bc7f003ec646580
ext.py
62,784
15
96
from_bytes
https://github.com/jindongwang/transferlearning.git
upd; format
205
0
13,038
13
2
9
def do_patch() -> None: from synapse.logging.context import current_context global _already_patched orig_inline_callbacks = defer.inlineCallbacks if _already_patched: return
synapse/util/patch_inline_callbacks.py
44
synapse
{ "docstring": "\n Patch defer.inlineCallbacks so that it checks the state of the logcontext on exit\n ", "language": "en", "n_whitespaces": 20, "n_words": 13, "vocab_size": 12 }
16
Python
16
fa0eab9c8e159b698a31fc7cfaafed643f47e284
patch_inline_callbacks.py
248,214
12
36
do_patch
https://github.com/matrix-org/synapse.git
Use `ParamSpec` in a few places (#12667)
38
0
72,158
7
3
11
def __rsub__(self, other): if isinstance(other, str_type): other = self._literalStringClass(other) if not isinstance(other, ParserElement): raise TypeError( "Cannot combine element of type {} with ParserElement".format( type(other).__name__ ) ) return other - self
pipenv/patched/notpip/_vendor/pyparsing/core.py
86
pipenv
{ "docstring": "\n Implementation of ``-`` operator when left operand is not a :class:`ParserElement`\n ", "language": "en", "n_whitespaces": 26, "n_words": 11, "vocab_size": 11 }
30
Python
26
f3166e673fe8d40277b804d35d77dcdb760fc3b3
core.py
20,553
10
52
__rsub__
https://github.com/pypa/pipenv.git
check point progress on only bringing in pip==22.0.4 (#4966) * vendor in pip==22.0.4 * updating vendor packaging version * update pipdeptree to fix pipenv graph with new version of pip. * Vendoring of pip-shims 0.7.0 * Vendoring of requirementslib 1.6.3 * Update pip index safety restrictions patch for pip==22.0.4 * Update patches * exclude pyptoject.toml from black to see if that helps. * Move this part of the hash collection back to the top (like prior implementation) because it affects the outcome of this test now in pip 22.0.4
140
0
3,422
14
8
32
def _get_categorical_mapping(self, scale, data): levels = categorical_order(data, scale.order) n = len(levels) values = scale.values if isinstance(values, dict): self._check_dict_entries(levels, values) # TODO where to ensure that dict values have consistent representation? colors = [values[x] for x in levels] elif isinstance(values, list): colors = self._check_list_length(levels, scale.values) elif isinstance(values, tuple): colors = blend_palette(values, n) elif isinstance(values, str): colors = color_palette(values, n) elif values is None: if n <= len(get_color_cycle()): # Use current (global) default palette colors = color_palette(n_colors=n) else: colors = color_palette("husl", n) else: scale_class = scale.__class__.__name__ msg = " ".join([ f"Scale values for {self.variable} with a {scale_class} mapping", f"must be string, list, tuple, or dict; not {type(scale.values)}." ]) raise TypeError(msg) # If color specified here has alpha channel, it will override alpha property colors = self._standardize_colors(colors)
seaborn/_core/properties.py
311
seaborn
{ "docstring": "Define mapping as lookup in list of discrete color values.", "language": "en", "n_whitespaces": 9, "n_words": 10, "vocab_size": 10 }
124
Python
89
a07ef69882ed76e09a0ed43d6f3ea33780c1b2be
properties.py
41,391
28
184
_get_categorical_mapping
https://github.com/mwaskom/seaborn.git
Transition mappings->properties, leaving a few loose ends
415
0
7,413
17
1
59
def generate_random_string(): import random import string return "".join(random.choices(string.ascii_uppercase + string.digits, k=8)) random_string = generate_random_string() # [START create_queue] create_queue = CloudTasksQueueCreateOperator( location=LOCATION, task_queue=Queue(stackdriver_logging_config=dict(sampling_ratio=0.5)), queue_name=QUEUE_ID + "{{ task_instance.xcom_pull(task_ids='random_string') }}", retry=Retry(maximum=10.0), timeout=5, task_id="create_queue", ) # [END create_queue] # [START delete_queue] delete_queue = CloudTasksQueueDeleteOperator( location=LOCATION, queue_name=QUEUE_ID + "{{ task_instance.xcom_pull(task_ids='random_string') }}", task_id="delete_queue", ) # [END delete_queue] delete_queue.trigger_rule = TriggerRule.ALL_DONE # [START resume_queue] resume_queue = CloudTasksQueueResumeOperator( location=LOCATION, queue_name=QUEUE_ID + "{{ task_instance.xcom_pull(task_ids='random_string') }}", task_id="resume_queue", ) # [END resume_queue] # [START pause_queue] pause_queue = CloudTasksQueuePauseOperator( location=LOCATION, queue_name=QUEUE_ID + "{{ task_instance.xcom_pull(task_ids='random_string') }}", task_id="pause_queue", ) # [END pause_queue] # [START purge_queue] purge_queue = CloudTasksQueuePurgeOperator( location=LOCATION, queue_name=QUEUE_ID + "{{ task_instance.xcom_pull(task_ids='random_string') }}", task_id="purge_queue", ) # [END purge_queue] # [START get_queue] get_queue = CloudTasksQueueGetOperator( location=LOCATION, queue_name=QUEUE_ID + "{{ task_instance.xcom_pull(task_ids='random_string') }}", task_id="get_queue", ) get_queue_result = BashOperator( task_id="get_queue_result", bash_command=f"echo {get_queue.output}", ) # [END get_queue] # [START update_queue] update_queue = CloudTasksQueueUpdateOperator( task_queue=Queue(stackdriver_logging_config=dict(sampling_ratio=1)), location=LOCATION, queue_name=QUEUE_ID + "{{ task_instance.xcom_pull(task_ids='random_string') }}", update_mask=FieldMask(paths=["stackdriver_logging_config.sampling_ratio"]), task_id="update_queue", ) # [END update_queue] # [START list_queue] list_queue = CloudTasksQueuesListOperator(location=LOCATION, task_id="list_queue") # [END list_queue] chain( random_string, create_queue, update_queue, pause_queue, resume_queue, purge_queue, get_queue, get_queue_result, list_queue, delete_queue, ) from tests.system.utils.watcher import watcher # This test needs watcher in order to properly mark success/failure # when "tearDown" task with trigger rule is part of the DAG list(dag.tasks) >> watcher() from tests.system.utils import get_test_run # noqa: E402 # Needed to run the example DAG with pytest (see: tests/system/README.md#run_via_pytest) test_run = get_test_run(dag)
tests/system/providers/google/tasks/example_queue.py
517
airflow
{ "docstring": "\n Generate random string for queue and task names.\n Queue name cannot be repeated in preceding 7 days and\n task name in the last 1 hour.\n ", "language": "en", "n_whitespaces": 54, "n_words": 25, "vocab_size": 21 }
221
Python
115
3977e1798d8294ba628b5f330f43702c1a5c79fc
example_queue.py
48,117
4
31
generate_random_string
https://github.com/apache/airflow.git
CloudTasks assets & system tests migration (AIP-47) (#23282)
636
0
9,364
13
1
3
def test_resource_requirements_none(mock_get_all_node_ids, mock_deployment_state):
python/ray/serve/tests/test_deployment_state.py
15
ray
{ "docstring": "Ensure resource_requirements doesn't break if a requirement is None", "language": "en", "n_whitespaces": 8, "n_words": 9, "vocab_size": 9 }
3
Python
3
65d0c0aa48be8f9f7faae857d3ab71444997755a
test_deployment_state.py
128,270
7
52
test_resource_requirements_none
https://github.com/ray-project/ray.git
[Serve] add alpha gRPC support (#28175)
6
0
28,652
6
1
41
def host_local_array_to_global_array(local_inputs, global_mesh, pspecs): def _convert(arr, pspec): if isinstance(arr, array.ArrayImpl) and isinstance(arr.sharding, PmapSharding): arr = np.array(arr) local_sharding = MeshPspecSharding(global_mesh.local_mesh, pspec) arrays = [ device_put(arr[index], d) for d, index in local_sharding.devices_indices_map(arr.shape).items() ] global_aval = global_mesh._local_to_global( pxla._get_array_mapping(pspec), core.ShapedArray(arr.shape, arrays[0].dtype)) return array.ArrayImpl(global_aval, MeshPspecSharding(global_mesh, pspec), arrays, committed=True) flattened_inps, in_tree = tree_flatten(local_inputs) in_pspecs = flatten_axis_resources( 'input pspecs', in_tree, pspecs, tupled_args=True) out = tree_map(_convert, tuple(flattened_inps), in_pspecs) return tree_unflatten(in_tree, out)
jax/experimental/pjit.py
262
jax
{ "docstring": "Converts a host local value to a globally sharded `jax.Array`.\n\n You can use this function to transition to `jax.Array`. Using `jax.Array` with\n `pjit` has the same semantics of using GDA with pjit i.e. all `jax.Array`\n inputs to pjit should be globally shaped.\n\n If you are currently passing host local values to pjit, you can use this\n function to convert your host local values to global Arrays and then pass that\n to pjit.\n\n Example usage:\n\n ```\n global_inputs = jax.experimental.pjit.host_local_array_to_global_array(\n host_local_inputs, global_mesh, in_pspecs)\n\n with mesh:\n global_out = pjitted_fun(global_inputs)\n\n host_local_output = jax.experimental.pjit.global_array_to_host_local_array(\n global_out, mesh, out_pspecs)\n ```\n\n Args:\n local_inputs: A Pytree of host local values.\n global_mesh: The global mesh.\n pspecs: A Pytree of PartitionSpecs.\n ", "language": "en", "n_whitespaces": 142, "n_words": 110, "vocab_size": 76 }
63
Python
55
4da72cf3988b4918f65b1401e46c40b7c4504963
pjit.py
122,219
7
54
host_local_array_to_global_array
https://github.com/google/jax.git
Add `host_local_array_to_global_array` and `global_array_to_host_local_array` for enabling transition to jax.Array. Also support `FROM_GDA` for `jax.Array` as a backwards compatible change so that users can continue to use that until they transition to jax.Array. Its currently required because of usage like `in_axis_resources = (FROM_GDA, FROM_GDA, P('data'), None)` and changing this on users side will require input from users so we as JAX can just support it as a temporary thing since GDA and Array act similarly in pjit. PiperOrigin-RevId: 479035326
151
0
27,124
15
1
5
def telemetry_write_key() -> Optional[Text]: return _fetch_write_key("segment", TELEMETRY_WRITE_KEY_ENVIRONMENT_VARIABLE)
rasa/telemetry.py
31
rasa
{ "docstring": "Read the Segment write key from the segment key text file.\n\n The segment key text file should by present only in wheel/sdist packaged\n versions of Rasa Open Source. This avoids running telemetry locally when\n developing on Rasa or when running CI builds.\n\n In local development, this should always return `None` to avoid logging telemetry.\n\n Returns:\n Segment write key, if the key file was present.\n ", "language": "en", "n_whitespaces": 89, "n_words": 64, "vocab_size": 50 }
7
Python
7
6339856514897056716bb531acb8489c9cf05d26
telemetry.py
159,329
13
17
telemetry_write_key
https://github.com/RasaHQ/rasa.git
Add support for different recipes (#10641) * Add support for different recipes Fixes https://github.com/RasaHQ/rasa/issues/10473 * Update docs/docs/graph-recipe.mdx Co-authored-by: Joe Juzl <[email protected]>
13
0
38,201
8
5
33
def testClusterAutoscaling(self): self.cluster.update_config( { "provider": {"head_resources": {"CPU": 4, "GPU": 0}}, } ) self.cluster.start() self.cluster.connect(client=True, timeout=120) self.assertGreater(ray.cluster_resources().get("CPU", 0), 0) # Trigger autoscaling pg = ray.util.placement_group([{"CPU": 1, "GPU": 1}] * 2) timeout = time.monotonic() + 120 while ray.cluster_resources().get("GPU", 0) < 2: if time.monotonic() > timeout: raise RuntimeError("Autoscaling failed or too slow.") time.sleep(1) # Schedule task with resources self.assertEquals( 5, ray.get( remote_task.options( num_cpus=1, num_gpus=1, scheduling_strategy=PlacementGroupSchedulingStrategy( placement_group=pg ), ).remote(5) ), ) print("Autoscaling worked") ray.util.remove_placement_group(pg) time.sleep(2) # Give some time so nodes.json is updated self.cluster.kill_node(num=2) print("Killed GPU node.") pg = ray.util.placement_group([{"CPU": 1, "GPU": 1}] * 2) table = ray.util.placement_group_table(pg) assert table["state"] == "PENDING" timeout = time.monotonic() + 180 while table["state"] != "CREATED": if time.monotonic() > timeout: raise RuntimeError("Re-starting killed node failed or too slow.") time.sleep(1) table = ray.util.placement_group_table(pg) print("Node was restarted.")
python/ray/tune/tests/test_multinode_sync.py
513
ray
{ "docstring": "Sanity check that multinode tests with autoscaling are working", "language": "en", "n_whitespaces": 8, "n_words": 9, "vocab_size": 9 }
126
Python
90
57cdbb1769a9c32972ba0ec9e7e857eeea961869
test_multinode_sync.py
127,553
42
300
testClusterAutoscaling
https://github.com/ray-project/ray.git
Migrate the deprecated placement_group option to PlacementGroupSchedulingStrategy (#28437) placement_group option is deprecated, use PlacementGroupSchedulingStrategy instead.
579
0
28,467
17
10
19
def _from_module(self, module, object): if module is None: return True elif inspect.getmodule(object) is not None: return module is inspect.getmodule(object) elif inspect.isfunction(object): return module.__dict__ is object.__globals__ elif inspect.ismethoddescriptor(object): if hasattr(object, '__objclass__'): obj_mod = object.__objclass__.__module__ elif hasattr(object, '__module__'): obj_mod = object.__module__ else: return True # [XX] no easy way to tell otherwise return module.__name__ == obj_mod elif inspect.isclass(object): return module.__name__ == object.__module__ elif hasattr(object, '__module__'): return module.__name__ == object.__module__ elif isinstance(object, property): return True # [XX] no way not be sure. else: raise ValueError("object must be a class or function")
python3.10.4/Lib/doctest.py
242
XX-Net
{ "docstring": "\n Return true if the given object is defined in the given\n module.\n ", "language": "en", "n_whitespaces": 34, "n_words": 12, "vocab_size": 10 }
88
Python
47
8198943edd73a363c266633e1aa5b2a9e9c9f526
doctest.py
223,483
23
148
_from_module
https://github.com/XX-net/XX-Net.git
add python 3.10.4 for windows
317
0
56,928
13
2
4
def add_prefix(self, field_name): return "%s-%s" % (self.prefix, field_name) if self.prefix else field_name
django/forms/forms.py
39
django
{ "docstring": "\n Return the field name with a prefix appended, if this Form has a\n prefix set.\n\n Subclasses may wish to override.\n ", "language": "en", "n_whitespaces": 49, "n_words": 20, "vocab_size": 18 }
12
Python
12
9c19aff7c7561e3a82978a272ecdaad40dda5c00
forms.py
205,963
2
24
add_prefix
https://github.com/django/django.git
Refs #33476 -- Reformatted code with Black.
26
0
51,300
9
3
10
def arc_tangent(value, default=_SENTINEL): try: return math.atan(float(value)) except (ValueError, TypeError): if default is _SENTINEL: raise_no_default("atan", value) return default
homeassistant/helpers/template.py
70
core
{ "docstring": "Filter and function to get arc tangent of the value.", "language": "en", "n_whitespaces": 9, "n_words": 10, "vocab_size": 10 }
17
Python
15
4885331509eeffe50f42d76b234996467b06170f
template.py
300,618
7
42
arc_tangent
https://github.com/home-assistant/core.git
Fail template functions when no default specified (#71687)
58
0
99,478
13
1
3
def master_target(self): return self._master_target
keras/distribute/distribute_coordinator_utils.py
19
keras
{ "docstring": "Returns the session master for the corresponding task to connect to.", "language": "en", "n_whitespaces": 10, "n_words": 11, "vocab_size": 10 }
4
Python
4
84afc5193d38057e2e2badf9c889ea87d80d8fbf
distribute_coordinator_utils.py
270,214
2
10
master_target
https://github.com/keras-team/keras.git
Reformatting the codebase with black. PiperOrigin-RevId: 450093126
18
0
80,417
6
1
5
def new(key, msg=None, digestmod=''): return HMAC(key, msg, digestmod)
python3.10.4/Lib/hmac.py
37
XX-Net
{ "docstring": "Create a new hashing object and return it.\n\n key: bytes or buffer, The starting key for the hash.\n msg: bytes or buffer, Initial input for the hash, or None.\n digestmod: A hash name suitable for hashlib.new(). *OR*\n A hashlib constructor returning a new hash object. *OR*\n A module supporting PEP 247.\n\n Required as of 3.8, despite its position after the optional\n msg argument. Passing it as a keyword argument is\n recommended, though not required for legacy API reasons.\n\n You can now feed arbitrary bytes into the object using its update()\n method, and can ask for the hash value at any time by calling its digest()\n or hexdigest() methods.\n ", "language": "en", "n_whitespaces": 200, "n_words": 108, "vocab_size": 80 }
8
Python
8
8198943edd73a363c266633e1aa5b2a9e9c9f526
hmac.py
217,657
2
23
new
https://github.com/XX-net/XX-Net.git
add python 3.10.4 for windows
14
0
54,872
7
1
23
def test_websocket_connect(self): with patch( "streamlit.server.server.LocalSourcesWatcher" ), self._patch_app_session(): yield self.start_server_loop() self.assertFalse(self.server.browser_is_connected) # Open a websocket connection ws_client = yield self.ws_connect() self.assertTrue(self.server.browser_is_connected) # Get this client's SessionInfo object self.assertEqual(1, len(self.server._session_info_by_id)) session_info = list(self.server._session_info_by_id.values())[0] # Close the connection ws_client.close() yield gen.sleep(0.1) self.assertFalse(self.server.browser_is_connected) # Ensure AppSession.shutdown() was called, and that our # SessionInfo was cleared. session_info.session.shutdown.assert_called_once() self.assertEqual(0, len(self.server._session_info_by_id))
lib/tests/streamlit/server_test.py
224
streamlit
{ "docstring": "Test that we can connect to the server via websocket.", "language": "en", "n_whitespaces": 9, "n_words": 10, "vocab_size": 10 }
54
Python
42
704eab3478cf69847825b23dabf15813a8ac9fa2
server_test.py
118,640
15
132
test_websocket_connect
https://github.com/streamlit/streamlit.git
Rename and refactor `Report` machinery (#4141) This refactor renames (almost) everything related to the outdated "report" concept with more precise concepts that we use throughout our code, primarily "script run", "session", and "app".
262
0
26,342
15
5
36
def call_find(self, other_args): parser = argparse.ArgumentParser( prog="find", add_help=False, formatter_class=argparse.ArgumentDefaultsHelpFormatter, description=, ) parser.add_argument( "-c", "--coin", help="Symbol Name or Id of Coin", dest="coin", required="-h" not in other_args, type=str, ) parser.add_argument( "-k", "--key", dest="key", help="Specify by which column you would like to search: symbol, name, id", type=str, choices=FIND_KEYS, default="symbol", ) parser.add_argument( "-l", "--limit", default=10, dest="limit", help="Number of records to display", type=check_positive, ) parser.add_argument( "-s", "--skip", default=0, dest="skip", help="Skip n of records", type=check_positive, ) if other_args and not other_args[0][0] == "-": other_args.insert(0, "-c") ns_parser = self.parse_known_args_and_warn( parser, other_args, EXPORT_ONLY_RAW_DATA_ALLOWED, ) # TODO: merge find + display_all_coins if ns_parser: if ns_parser.coin == "ALL": display_all_coins( symbol=ns_parser.coin, source=ns_parser.source, limit=ns_parser.limit, skip=ns_parser.skip, show_all=True, export=ns_parser.export, ) else: find( query=ns_parser.coin, source=ns_parser.source, key=ns_parser.key, limit=ns_parser.limit, export=ns_parser.export, )
openbb_terminal/cryptocurrency/crypto_controller.py
406
OpenBBTerminal
{ "docstring": "Process find command\n Find similar coin by name, symbol, or id. If you don't remember exact name or id of the Coin at CoinGecko,\n Binance, Coinbase or CoinPaprika you can use this command to display coins with similar name, symbol or id\n to your search query.\n Example of usage: coin name is something like \"polka\". So I can try: find -c polka -k name -t 25\n It will search for coin that has similar name to polka and display top 25 matches.\n -c, --coin stands for coin - you provide here your search query\n -k, --key it's a searching key. You can search by symbol, id or name of coin\n -l, --limit it displays top N number of records.\n coins: Shows list of coins available on CoinGecko, CoinPaprika and Binance.If you provide name of\n coin then in result you will see ids of coins with best match for all mentioned services.\n If you provide \"ALL\" in your coin search query, then all coins will be displayed. To move over coins you\n can use pagination mechanism with skip, top params. E.g. coins ALL --skip 100 --limit 30 then all coins\n from 100 to 130 will be displayed. By default skip = 0, limit = 10.\n If you won't provide source of the data everything will be displayed (CoinGecko, CoinPaprika, Binance).\n If you want to search only in given source then use --source flag. E.g. if you want to find coin with name\n uniswap on CoinPaprika then use: coins uniswap --source cp --limit 10\n ", "language": "en", "n_whitespaces": 439, "n_words": 252, "vocab_size": 139 }
114
Python
90
09f753da1c2a2f03c41fe6a3ca2eb79f6ea58995
crypto_controller.py
286,398
82
257
call_find
https://github.com/OpenBB-finance/OpenBBTerminal.git
More Fixes to Crypto + key sort (#3244) * fix #3095 - autocomplete and command working + key sort * fix #3056 * fix [Bug] bugs #3048 * fix [Bug] bug #3017 * sort -> sortby, not ascend, tests * fix my goof ups Co-authored-by: james <[email protected]>
880
0
85,785
14
9
26
def load_file(file_location=None): if str(file_location).endswith(".ini"): params = configparser.RawConfigParser() params.read(file_location) params.optionxform = str # type: ignore params = params["OPENBB"] if "technique" in params: current_model = params["technique"] else: current_model = None elif str(file_location).endswith(".xlsx"): params, _ = excel_model.load_configuration(file_location) current_model = params["technique"] else: console.print( "Can not load in the file due to not being an .ini or .xlsx file." ) return None, None max_len = max(len(k) for k in params.keys()) help_text = "[info]Parameters:[/info]\n" if current_model: for k, v in params.items(): all_params = DEFAULT_PARAMETERS + MODEL_PARAMS[current_model] if k in all_params: help_text += f" [param]{k}{' ' * (max_len - len(k))} :[/param] {v}\n" else: for k, v in params.items(): help_text += f" [param]{k}{' ' * (max_len - len(k))} :[/param] {v}\n" console.print(help_text) return params, current_model
openbb_terminal/portfolio/portfolio_optimization/parameters/params_view.py
363
OpenBBTerminal
{ "docstring": "\n Loads in the configuration file and return the parameters in a dictionary including the model if available.\n\n Parameters\n ----------\n file_location: str\n The location of the file to be loaded in either xlsx or ini.\n\n Returns\n -------\n Return the parameters and the model, if available.\n ", "language": "en", "n_whitespaces": 76, "n_words": 44, "vocab_size": 32 }
116
Python
70
34bc290dded1bd2418fc3c6b375a79f9cdd68d5a
params_view.py
284,352
30
176
load_file
https://github.com/OpenBB-finance/OpenBBTerminal.git
New portfolio optimization menu (#1642) * New-Portfolio-Optimization-Menu * New-Portfolio-Optimization-Menu * New-Portfolio-Optimization-Menu * Update _index.md * New-Portfolio-Optimization-Menu * New-Portfolio-Optimization-Menu * configure portfolio optimization parameters ini * minor improvement * Revert "New-Portfolio-Optimization-Menu" This reverts commit b4b7169cfbc8f28c379eb1920307c2cdd2e47a0f. * Add in Excel functionality and improve the capabilities * Add Excel load function * Tidying up the functions * New-Portfolio-Optimization-Menu * New-Portfolio-Optimization-Menu * New-Portfolio-Optimization-Menu * Re-add my code * Some spacing and details * Add folder structure for portfolio * Update terminal file loading * New-Portfolio-Optimization-Menu * Make it possible to move from params to po with loaded file * New-Portfolio-Optimization-Menu * New-Portfolio-Optimization-Menu * Making the connection between the parameters file and the functions * Add in allocation and new params files * Improve params default settings * New-Portfolio-Optimization-Menu * Update Portfolios and Params sheets * Update sheets * Update command to load in correct sheet * Adjust function to only read specific columns * Update portfolio * Small correction * New-Portfolio-Optimization-Menu * New-Portfolio-Optimization-Menu * New-Portfolio-Optimization-Menu * Patched up show error * Add Equity portfolio * Make functions more robust * New-Portfolio-Optimization-Menu * New-Portfolio-Optimization-Menu * Add in Params documentation * Fixing Linting * Add in Requirements and Poetry Updates * Update website * linting * Update tests * Minor fix * remove unneccesary READMEs * Remove expected variable type * Improve documentation * Clean up the code * Refractoring * Adjust names to make it OS friendly Co-authored-by: Jeroen Bouma <[email protected]> Co-authored-by: jmaslek <[email protected]> Co-authored-by: Colin Delahunty <[email protected]> Co-authored-by: DidierRLopes <[email protected]>
325
0
84,704
19
1
2
def showticklabels(self): return self["showticklabels"]
packages/python/plotly/plotly/graph_objs/bar/marker/_colorbar.py
22
plotly.py
{ "docstring": "\n Determines whether or not the tick labels are drawn.\n\n The 'showticklabels' property must be specified as a bool\n (either True, or False)\n\n Returns\n -------\n bool\n ", "language": "en", "n_whitespaces": 75, "n_words": 25, "vocab_size": 23 }
4
Python
4
43e3a4011080911901176aab919c0ecf5046ddd3
_colorbar.py
228,743
2
11
showticklabels
https://github.com/plotly/plotly.py.git
switch to black .22
18
0
60,416
7
1
3
def get_trial_id() -> str: return _trial_id
nni/trial.py
18
nni
{ "docstring": "\n Return unique ID of the trial that is current running.\n\n This is shown as \"ID\" in the web portal's trial table.\n ", "language": "en", "n_whitespaces": 31, "n_words": 21, "vocab_size": 18 }
6
Python
6
553e91f4205b286ce7d71142f517c010bbcefac7
trial.py
112,010
7
9
get_trial_id
https://github.com/microsoft/nni.git
Update trial and experiment docstr (#4672)
12
0
24,548
6
35
61
def make_stock_entry(**args): def process_serial_numbers(serial_nos_list): serial_nos_list = [ "\n".join(serial_num["serial_no"] for serial_num in serial_nos_list if serial_num.serial_no) ] uniques = list(set(serial_nos_list[0].split("\n"))) return "\n".join(uniques) s = frappe.new_doc("Stock Entry") args = frappe._dict(args) if args.posting_date or args.posting_time: s.set_posting_time = 1 if args.posting_date: s.posting_date = args.posting_date if args.posting_time: s.posting_time = args.posting_time if args.inspection_required: s.inspection_required = args.inspection_required # map names if args.from_warehouse: args.source = args.from_warehouse if args.to_warehouse: args.target = args.to_warehouse if args.item_code: args.item = args.item_code if args.apply_putaway_rule: s.apply_putaway_rule = args.apply_putaway_rule if isinstance(args.qty, str): if "." in args.qty: args.qty = flt(args.qty) else: args.qty = cint(args.qty) # purpose if not args.purpose: if args.source and args.target: s.purpose = "Material Transfer" elif args.source: s.purpose = "Material Issue" else: s.purpose = "Material Receipt" else: s.purpose = args.purpose # company if not args.company: if args.source: args.company = frappe.db.get_value("Warehouse", args.source, "company") elif args.target: args.company = frappe.db.get_value("Warehouse", args.target, "company") # set vales from test if frappe.flags.in_test: if not args.company: args.company = "_Test Company" if not args.item: args.item = "_Test Item" s.company = args.company or erpnext.get_default_company() s.purchase_receipt_no = args.purchase_receipt_no s.delivery_note_no = args.delivery_note_no s.sales_invoice_no = args.sales_invoice_no s.is_opening = args.is_opening or "No" if not args.cost_center: args.cost_center = frappe.get_value("Company", s.company, "cost_center") if not args.expense_account and s.is_opening == "No": args.expense_account = frappe.get_value("Company", s.company, "stock_adjustment_account") # We can find out the serial number using the batch source document serial_number = args.serial_no if not args.serial_no and args.qty and args.batch_no: serial_number_list = frappe.get_list( doctype="Stock Ledger Entry", fields=["serial_no"], filters={"batch_no": args.batch_no, "warehouse": args.from_warehouse}, ) serial_number = process_serial_numbers(serial_number_list) args.serial_no = serial_number s.append( "items", { "item_code": args.item, "s_warehouse": args.source, "t_warehouse": args.target, "qty": args.qty, "basic_rate": args.rate or args.basic_rate, "conversion_factor": args.conversion_factor or 1.0, "transfer_qty": flt(args.qty) * (flt(args.conversion_factor) or 1.0), "serial_no": args.serial_no, "batch_no": args.batch_no, "cost_center": args.cost_center, "expense_account": args.expense_account, }, ) s.set_stock_entry_type() if not args.do_not_save: s.insert() if not args.do_not_submit: s.submit() return s
erpnext/stock/doctype/stock_entry/stock_entry_utils.py
1,040
erpnext
{ "docstring": "Helper function to make a Stock Entry\n\n\t:item_code: Item to be moved\n\t:qty: Qty to be moved\n\t:company: Company Name (optional)\n\t:from_warehouse: Optional\n\t:to_warehouse: Optional\n\t:rate: Optional\n\t:serial_no: Optional\n\t:batch_no: Optional\n\t:posting_date: Optional\n\t:posting_time: Optional\n\t:purpose: Optional\n\t:do_not_save: Optional flag\n\t:do_not_submit: Optional flag\n\t", "language": "en", "n_whitespaces": 29, "n_words": 43, "vocab_size": 29 }
280
Python
164
494bd9ef78313436f0424b918f200dab8fc7c20b
stock_entry_utils.py
67,763
84
574
make_stock_entry
https://github.com/frappe/erpnext.git
style: format code with black
186
0
14,615
15
3
11
def get_one_match(expr, lines): # member names in the ld_headers output are between square brackets expr = rf'\[({expr})\]' matches = list(filter(None, (re.search(expr, line) for line in lines))) if len(matches) == 1: return matches[0].group(1) else: return None # additional processing to deal with AIX legacy names for 64-bit members
python3.10.4/Lib/ctypes/_aix.py
98
XX-Net
{ "docstring": "\n Must be only one match, otherwise result is None.\n When there is a match, strip leading \"[\" and trailing \"]\"\n ", "language": "en", "n_whitespaces": 30, "n_words": 20, "vocab_size": 18 }
47
Python
41
8198943edd73a363c266633e1aa5b2a9e9c9f526
_aix.py
221,800
7
58
get_one_match
https://github.com/XX-net/XX-Net.git
add python 3.10.4 for windows
78
0
56,517
13
1
11
def test_config_validation_for_choices_workaround(business_client, project_id): payload = { 'label_config': '<View><Text name="artist" /><View><Choices name="choices_1" toName="artist"><Choice name="choice_1" value="1"/></Choices></View><View><Choices name="choices_2" toName="artist"><Choice name="choice_2" value="2"/></Choices></View></View>'} response = business_client.patch( f"/api/projects/{project_id}", data=json.dumps(payload), content_type="application/json", ) assert response.status_code == 200 payload = { 'label_config': '<View><Text name="artist" /><View><Choices name="choices_1" toName="artist"><Choice name="choice_1" value="1"/></Choices><Choices name="choices_2" toName="artist"><Choice name="choice_2" value="2"/></Choices></View></View>'} response = business_client.patch( f"/api/projects/{project_id}", data=json.dumps(payload), content_type="application/json", ) assert response.status_code == 200
label_studio/tests/test_config_validation.py
143
label-studio
{ "docstring": "\n Validate Choices tag for 1 choice with workaround\n Example bug DEV-3635\n ", "language": "en", "n_whitespaces": 21, "n_words": 11, "vocab_size": 11 }
55
Python
28
e3c87a8a709006f9064b8c32782fbd5461bd0d1d
test_config_validation.py
178,285
17
80
test_config_validation_for_choices_workaround
https://github.com/heartexlabs/label-studio.git
fix: DEV-4035: Fix single choice workaround for several choices tags (#3319) * fix: DEV-3635: Fix single choice workaround for several choices tags
138
0
42,647
11
3
23
def get_count(self, dttm_filter, session, states) -> int: TI = TaskInstance DR = DagRun if not dttm_filter: return 0 if self.external_task_ids: count = ( session.query(func.count()) # .count() is inefficient .filter( TI.dag_id == self.external_dag_id, TI.task_id.in_(self.external_task_ids), TI.state.in_(states), TI.execution_date.in_(dttm_filter), ) .scalar() ) count = count / len(self.external_task_ids) else: count = ( session.query(func.count()) .filter( DR.dag_id == self.external_dag_id, DR.state.in_(states), DR.execution_date.in_(dttm_filter), ) .scalar() ) return count
airflow/sensors/external_task.py
238
airflow
{ "docstring": "\n Get the count of records against dttm filter and states\n\n :param dttm_filter: date time filter for execution date\n :param session: airflow session object\n :param states: task or dag states\n :return: count of record against the filters\n ", "language": "en", "n_whitespaces": 79, "n_words": 36, "vocab_size": 27 }
59
Python
40
baf50cddd86ac07f064c8cbd95efb22d038b3832
external_task.py
44,412
36
152
get_count
https://github.com/apache/airflow.git
Fix tests for mssql after SQLA 1.4 upgrade (#21303) The way SQLA 1.4 constructed the query then `exeuction_date.in_([])` changed, and as a result it started failing. But we don't even need to ask the database in this case, as we know it won't return any rows.
428
0
8,251
18
1
27
async def test_boost_mode(hass, aioclient_mock, mock_deconz_websocket): data = { "sensors": { "0": { "config": { "battery": 58, "heatsetpoint": 2200, "locked": False, "mode": "heat", "offset": -200, "on": True, "preset": "manual", "reachable": True, "schedule": {}, "schedule_on": False, "setvalve": False, "windowopen_set": False, }, "ep": 1, "etag": "404c15db68c318ebe7832ce5aa3d1e30", "lastannounced": "2022-08-31T03:00:59Z", "lastseen": "2022-09-19T11:58Z", "manufacturername": "_TZE200_b6wax7g0", "modelid": "TS0601", "name": "Thermostat", "state": { "lastupdated": "2022-09-19T11:58:24.204", "lowbattery": False, "on": False, "temperature": 2200, "valve": 0, }, "type": "ZHAThermostat", "uniqueid": "84:fd:27:ff:fe:8a:eb:89-01-0201", } } } with patch.dict(DECONZ_WEB_REQUEST, data): config_entry = await setup_deconz_integration(hass, aioclient_mock) assert len(hass.states.async_all()) == 3 climate_thermostat = hass.states.get("climate.thermostat") assert climate_thermostat.state == HVACMode.HEAT assert climate_thermostat.attributes["preset_mode"] is DECONZ_PRESET_MANUAL assert climate_thermostat.attributes["hvac_action"] is HVACAction.IDLE # Event signals thermostat preset boost and valve 100 (real data) event_changed_sensor = { "t": "event", "e": "changed", "r": "sensors", "id": "0", "config": {"preset": "boost"}, "state": {"valve": 100}, } await mock_deconz_websocket(data=event_changed_sensor) await hass.async_block_till_done() climate_thermostat = hass.states.get("climate.thermostat") assert climate_thermostat.attributes["preset_mode"] is PRESET_BOOST assert climate_thermostat.attributes["hvac_action"] is HVACAction.HEATING # Verify service calls mock_deconz_put_request(aioclient_mock, config_entry.data, "/sensors/0/config")
tests/components/deconz/test_climate.py
549
core
{ "docstring": "Test that a climate device with boost mode and different state works.", "language": "en", "n_whitespaces": 11, "n_words": 12, "vocab_size": 12 }
151
Python
112
7a6897c7578dffd6b67f57747ebd81b67b153e01
test_climate.py
308,133
58
297
test_boost_mode
https://github.com/home-assistant/core.git
Add deconz current hvac operation to thermostate based on "state" (#59989) * deconz - add current hvac operation to thermostate based on "state" * deconz - extend current hvac operation to thermostate based on "state" and "mode" * Add tests for current hvac action * Add boost mode as special case * format using Black * sort imports * Add test for device with mode none and state none * Update homeassistant/components/deconz/climate.py Co-authored-by: Robert Svensson <[email protected]> * Fix test_climate.py test_no_mode_no_state * Add test for boost mode Co-authored-by: Robert Svensson <[email protected]>
811
0
106,894
15
1
9
def exp_var(self): var1 = self.y_pred_true.selectExpr("variance(label - prediction)").collect()[0][ 0 ] var2 = self.y_pred_true.selectExpr("variance(label)").collect()[0][0] # numpy divide is more tolerant to var2 being zero return 1 - np.divide(var1, var2)
recommenders/evaluation/spark_evaluation.py
96
recommenders
{ "docstring": "Calculate explained variance.\n\n .. note::\n Spark MLLib's implementation is buggy (can lead to values > 1), hence we use var().\n\n Returns:\n float: Explained variance (min=0, max=1).\n ", "language": "en", "n_whitespaces": 68, "n_words": 26, "vocab_size": 26 }
27
Python
24
4637482026d2afc5dd93e1fdce6a3c9285427062
spark_evaluation.py
39,311
6
57
exp_var
https://github.com/microsoft/recommenders.git
use numpy divide in explained variance
80
0
7,208
13
1
11
def center_to_corners_format(x): x_center, y_center, width, height = x.unbind(-1) boxes = [(x_center - 0.5 * width), (y_center - 0.5 * height), (x_center + 0.5 * width), (y_center + 0.5 * height)] return torch.stack(boxes, dim=-1)
src/transformers/models/owlvit/feature_extraction_owlvit.py
103
transformers
{ "docstring": "\n Converts a PyTorch tensor of bounding boxes of center format (center_x, center_y, width, height) to corners format\n (left, top, right, bottom).\n ", "language": "en", "n_whitespaces": 31, "n_words": 21, "vocab_size": 19 }
33
Python
22
12d66b47012c9258f9557e6d3a0c13bcd1c72871
feature_extraction_owlvit.py
32,353
4
76
center_to_corners_format
https://github.com/huggingface/transformers.git
Add OWL-ViT model for zero-shot object detection (#17938) * add owlvit model skeleton * add class and box predictor heads * convert modified flax clip to pytorch * fix box and class predictors * add OwlViTImageTextEmbedder * convert class and box head checkpoints * convert image text embedder checkpoints * add object detection head * fix bugs * update conversion script * update conversion script * fix q,v,k,out weight conversion conversion * add owlvit object detection output * fix bug in image embedder * fix bugs in text embedder * fix positional embeddings * fix bug in inference mode vision pooling * update docs, init tokenizer and processor files * support batch processing * add OwlViTProcessor * remove merge conflicts * readd owlvit imports * fix bug in OwlViTProcessor imports * fix bugs in processor * update docs * fix bugs in processor * update owlvit docs * add OwlViTFeatureExtractor * style changes, add postprocess method to feature extractor * add feature extractor and processor tests * add object detection tests * update conversion script * update config paths * update config paths * fix configuration paths and bugs * fix bugs in OwlViT tests * add import checks to processor * fix docs and minor issues * fix docs and minor issues * fix bugs and issues * fix bugs and issues * fix bugs and issues * fix bugs and issues * update docs and examples * fix bugs and issues * update conversion script, fix positional embeddings * process 2D input ids, update tests * fix style and quality issues * update docs * update docs and imports * update OWL-ViT index.md * fix bug in OwlViT feature ext tests * fix code examples, return_dict by default * return_dict by default * minor fixes, add tests to processor * small fixes * add output_attentions arg to main model * fix bugs * remove output_hidden_states arg from main model * update self.config variables * add option to return last_hidden_states * fix bug in config variables * fix copied from statements * fix small issues and bugs * fix bugs * fix bugs, support greyscale images * run fixup * update repo name * merge OwlViTImageTextEmbedder with obj detection head * fix merge conflict * fix merge conflict * make fixup * fix bugs * fix bugs * add additional processor test
45
0
5,912
10
5
32
def _sort_key(self, candidate): # type: (InstallationCandidate) -> CandidateSortingKey valid_tags = self._supported_tags support_num = len(valid_tags) build_tag = () # type: BuildTag binary_preference = 0 link = candidate.link if link.is_wheel: # can raise InvalidWheelFilename wheel = Wheel(link.filename) try: pri = -(wheel.find_most_preferred_tag( valid_tags, self._wheel_tag_preferences )) except ValueError: raise UnsupportedWheel( "{} is not a supported wheel for this platform. It " "can't be sorted.".format(wheel.filename) ) if self._prefer_binary: binary_preference = 1 if wheel.build_tag is not None: match = re.match(r'^(\d+)(.*)$', wheel.build_tag) build_tag_groups = match.groups() build_tag = (int(build_tag_groups[0]), build_tag_groups[1]) else: # sdist pri = -(support_num) has_allowed_hash = int(link.is_hash_allowed(self._hashes)) yank_value = -1 * int(link.is_yanked) # -1 for yanked. return ( has_allowed_hash, yank_value, binary_preference, candidate.version, pri, build_tag, )
.venv/lib/python3.8/site-packages/pip/_internal/index/package_finder.py
289
transferlearning
{ "docstring": "\n Function to pass as the `key` argument to a call to sorted() to sort\n InstallationCandidates by preference.\n\n Returns a tuple such that tuples sorting as greater using Python's\n default comparison operator are more preferred.\n\n The preference is as follows:\n\n First and foremost, candidates with allowed (matching) hashes are\n always preferred over candidates without matching hashes. This is\n because e.g. if the only candidate with an allowed hash is yanked,\n we still want to use that candidate.\n\n Second, excepting hash considerations, candidates that have been\n yanked (in the sense of PEP 592) are always less preferred than\n candidates that haven't been yanked. Then:\n\n If not finding wheels, they are sorted by version only.\n If finding wheels, then the sort order is by version, then:\n 1. existing installs\n 2. wheels ordered via Wheel.support_index_min(self._supported_tags)\n 3. source archives\n If prefer_binary was set, then all wheels are sorted above sources.\n\n Note: it was considered to embed this logic into the Link\n comparison operators, but then different sdist links\n with the same version, would have to be considered equal\n ", "language": "en", "n_whitespaces": 346, "n_words": 173, "vocab_size": 123 }
109
Python
79
f638f5d0e6c8ebed0e69a6584bc7f003ec646580
package_finder.py
60,747
31
178
_sort_key
https://github.com/jindongwang/transferlearning.git
upd; format
479
0
12,274
16
1
4
def extract(self) -> str: return self._src.extract()
pipenv/vendor/tomlkit/parser.py
29
pipenv
{ "docstring": "\n Extracts the value between marker and index\n ", "language": "en", "n_whitespaces": 22, "n_words": 7, "vocab_size": 7 }
6
Python
6
8faa74cdc9da20cfdcc69f5ec29b91112c95b4c9
parser.py
21,815
5
16
extract
https://github.com/pypa/pipenv.git
Update tomlkit==0.9.2 Used: python -m invoke vendoring.update --package=tomlkit
20
0
4,057
8
1
10
def cumsum(x, axis=0): return tf.cumsum(x, axis=axis) @keras_export("keras.backend.cumprod") @tf.__internal__.dispatch.add_dispatch_support @doc_controls.do_not_generate_docs
keras/backend.py
63
@keras_export("keras.backend.cumprod") @tf.__internal__.dispatch.add_dispatch_support @doc_controls.do_not_generate_docs
keras
{ "docstring": "Cumulative sum of the values in a tensor, alongside the specified axis.\n\n Args:\n x: A tensor or variable.\n axis: An integer, the axis to compute the sum.\n\n Returns:\n A tensor of the cumulative sum of values of `x` along `axis`.\n ", "language": "en", "n_whitespaces": 70, "n_words": 40, "vocab_size": 29 }
9
Python
9
84afc5193d38057e2e2badf9c889ea87d80d8fbf
backend.py
269,599
2
21
cumsum
https://github.com/keras-team/keras.git
Reformatting the codebase with black. PiperOrigin-RevId: 450093126
12
1
80,220
8
3
11
def is_done(self, best_sum_logprobs, cur_len): if len(self) < self.num_beams: return False elif self.early_stopping: return True else: cur_score = best_sum_logprobs / cur_len**self.length_penalty ret = self.worst_score >= cur_score return ret
src/transformers/generation_tf_utils.py
79
transformers
{ "docstring": "\n If there are enough hypotheses and that none of the hypotheses being generated can become better than the worst\n one in the heap, then we are done with this sentence.\n ", "language": "en", "n_whitespaces": 52, "n_words": 30, "vocab_size": 26 }
27
Python
22
7732d0fe7a759c9844215920e9f1c5540eafb1a6
generation_tf_utils.py
35,036
9
49
is_done
https://github.com/huggingface/transformers.git
Upgrade black to version ~=22.0 (#15565) * Upgrade black to version ~=22.0 * Check copies * Fix code
110
0
6,381
12
1
13
def test_dataset_shard_with_only_local(self): config = { "input": "dataset", "input_config": {"format": "json", "paths": self.dset_path}, } # two ways of doing this: # we have no remote workers _, shards = get_dataset_and_shards(config, num_workers=0) assert len(shards) == 1 assert isinstance(shards[0], ray.data.Dataset)
rllib/offline/tests/test_dataset_reader.py
107
ray
{ "docstring": "Tests whether the dataset_shard function works correctly for a single shard\n for the local worker.", "language": "en", "n_whitespaces": 21, "n_words": 15, "vocab_size": 13 }
37
Python
34
569fe0109629048d08e1d9e023f7769f10bd2244
test_dataset_reader.py
125,001
8
61
test_dataset_shard_with_only_local
https://github.com/ray-project/ray.git
[RLlib] improved unittests for dataset_reader and fixed bugs (#26458)
115
0
27,741
11
1
4
def for_all_test_methods(decorator, *args, **kwargs):
keras/testing_infra/test_utils.py
21
keras
{ "docstring": "Generate class-level decorator from given method-level decorator.\n\n It is expected for the given decorator to take some arguments and return\n a method that is then called on the test method to produce a decorated\n method.\n\n Args:\n decorator: The decorator to apply.\n *args: Positional arguments\n **kwargs: Keyword arguments\n Returns: Function that will decorate a given classes test methods with the\n decorator.\n ", "language": "en", "n_whitespaces": 98, "n_words": 60, "vocab_size": 43 }
4
Python
4
84afc5193d38057e2e2badf9c889ea87d80d8fbf
test_utils.py
276,383
3
16
for_all_test_methods
https://github.com/keras-team/keras.git
Reformatting the codebase with black. PiperOrigin-RevId: 450093126
7
0
81,645
6
1
10
def get_theme_dir(name): theme = get_themes()[name] return os.path.dirname(os.path.abspath(theme.load().__file__))
mkdocs/utils/__init__.py
61
mkdocs
{ "docstring": "Return the directory of an installed theme by name.", "language": "en", "n_whitespaces": 8, "n_words": 9, "vocab_size": 9 }
7
Python
7
e7f07cc82ab2be920ab426ba07456d8b2592714d
__init__.py
224,062
3
36
get_theme_dir
https://github.com/mkdocs/mkdocs.git
Remove spaces at the ends of docstrings, normalize quotes
16
0
57,209
12
1
16
def test_cache() -> None: ledger_store = DictLedgerStore() user_key = b"1322" ledger = DataSubjectLedger.get_or_create(store=ledger_store, user_key=user_key) assert ( ledger._cache_constant2epsilon[0] == 0.05372712063485988 ), "The first value in the cache is incorrect" assert ( ledger._cache_constant2epsilon[1] == 0.07773597369831031 ), "Has the DP cache been changed?" rdp_700k = convert_constants_to_indices(np.array([700_000])) assert ( ledger._cache_constant2epsilon.take(rdp_700k)[0] == 706213.1816144075 ), "Has the DP cache been changed?" rdp_50 = convert_constants_to_indices(np.array([50])) assert ( ledger._cache_constant2epsilon.take(rdp_50)[0] == 100.68990516105825 ), "Has the DP cache been changed?" assert ( len(ledger._cache_constant2epsilon) >= 1_200_000 ), "Has the cache been changed?"
packages/syft/tests/syft/core/adp/data_subject_ledger_test.py
211
PySyft
{ "docstring": "Ensure the most up to date RDP-to-epsilon cache is being used.", "language": "en", "n_whitespaces": 10, "n_words": 11, "vocab_size": 11 }
81
Python
43
61f4138eeb028287425f6007d692bf7faa808e75
data_subject_ledger_test.py
3,245
22
139
test_cache
https://github.com/OpenMined/PySyft.git
Add tests for ledger and cache
164
0
413
11
4
17
def _apply_scores(self, scores, value, scores_mask=None, training=None): if scores_mask is not None: padding_mask = tf.logical_not(scores_mask) # Bias so padding positions do not contribute to attention distribution. # Note 65504. is the max float16 value. if scores.dtype is tf.float16: scores -= 65504.0 * tf.cast(padding_mask, dtype=scores.dtype) else: scores -= 1.0e9 * tf.cast(padding_mask, dtype=scores.dtype) if training is None: training = backend.learning_phase() weights = tf.nn.softmax(scores)
keras/layers/attention/base_dense_attention.py
153
keras
{ "docstring": "Applies attention scores to the given value tensor.\n\n To use this method in your attention layer, follow the steps:\n\n * Use `query` tensor of shape `[batch_size, Tq]` and `key` tensor of shape\n `[batch_size, Tv]` to calculate the attention `scores`.\n * Pass `scores` and `value` tensors to this method. The method applies\n `scores_mask`, calculates `attention_distribution = softmax(scores)`, then\n returns `matmul(attention_distribution, value).\n * Apply `query_mask` and return the result.\n\n Args:\n scores: Scores float tensor of shape `[batch_size, Tq, Tv]`.\n value: Value tensor of shape `[batch_size, Tv, dim]`.\n scores_mask: A boolean mask `Tensor` of shape `[batch_size, 1, Tv]` or\n `[batch_size, Tq, Tv]`. If given, scores at positions where\n `scores_mask==False` do not contribute to the result. It must contain\n at least one `True` value in each line along the last dimension.\n training: Python boolean indicating whether the layer should behave in\n training mode (adding dropout) or in inference mode (no dropout).\n\n Returns:\n Tensor of shape `[batch_size, Tq, dim]`.\n Attention scores after masking and softmax with shape\n `[batch_size, Tq, Tv]`.\n ", "language": "en", "n_whitespaces": 350, "n_words": 165, "vocab_size": 108 }
60
Python
44
84afc5193d38057e2e2badf9c889ea87d80d8fbf
base_dense_attention.py
272,375
15
133
_apply_scores
https://github.com/keras-team/keras.git
Reformatting the codebase with black. PiperOrigin-RevId: 450093126
184
0
81,002
16
1
3
def drain_call_queue(self, num_splits=None):
modin/core/execution/ray/implementations/pandas_on_ray/partitioning/virtual_partition.py
18
modin
{ "docstring": "\n Execute all operations stored in this partition's call queue.\n\n Parameters\n ----------\n num_splits : int, default: None\n The number of times to split the result object.\n ", "language": "en", "n_whitespaces": 72, "n_words": 25, "vocab_size": 25 }
3
Python
3
3d4404e9d9a9b2a3327f8aee664a8e71ac1f18b8
virtual_partition.py
153,772
7
40
drain_call_queue
https://github.com/modin-project/modin.git
FEAT-#4412: Add Batch Pipeline API to Modin (#4452) Co-authored-by: Yaroslav Igoshev <[email protected]> Co-authored-by: Mahesh Vashishtha <[email protected]> Signed-off-by: Rehan Durrani <[email protected]>
10
0
35,596
6
3
14
def has_completed_sso(request, organization_id) -> bool: sso_session_in_request = request.session.get( SsoSession.django_session_key(organization_id), None ) if not sso_session_in_request: metrics.incr("sso.no-value-in-session") return False django_session_value = SsoSession.from_django_session_value( organization_id, sso_session_in_request ) if not django_session_value.is_sso_authtime_fresh(): metrics.incr("sso.session-timed-out") return False metrics.incr("sso.session-verify-success") return True
src/sentry/utils/auth.py
125
sentry
{ "docstring": "\n look for the org id under the sso session key, and check that the timestamp isn't past our expiry limit\n ", "language": "en", "n_whitespaces": 27, "n_words": 20, "vocab_size": 18 }
32
Python
24
2bad4600970d40bc799143571ab708a19e9774d1
auth.py
95,876
18
73
has_completed_sso
https://github.com/getsentry/sentry.git
chore(auth): remove deprecated SSO key check (#30889) * remove deprecated sso values * clean up checking logic * update metric name
101
0
19,252
10
7
14
def process_arguments(self): args = [arg for arg in sys.argv] # pylint:disable=unnecessary-comprehension if self.updater: from lib.utils import get_backend # pylint:disable=import-outside-toplevel args.append(f"--{get_backend()}") for arg in args: if arg == "--installer": self.is_installer = True if arg == "--nvidia": self.enable_cuda = True if arg == "--amd": self.enable_amd = True
setup.py
129
faceswap
{ "docstring": " Process any cli arguments and dummy in cli arguments if calling from updater. ", "language": "en", "n_whitespaces": 14, "n_words": 13, "vocab_size": 11 }
45
Python
28
c1512fd41d86ef47a5d1ce618d6d755ef7cbacdf
setup.py
100,422
12
70
process_arguments
https://github.com/deepfakes/faceswap.git
Update code to support Tensorflow versions up to 2.8 (#1213) * Update maximum tf version in setup + requirements * - bump max version of tf version in launcher - standardise tf version check * update keras get_custom_objects for tf>2.6 * bugfix: force black text in GUI file dialogs (linux) * dssim loss - Move to stock tf.ssim function * Update optimizer imports for compatibility * fix logging for tf2.8 * Fix GUI graphing for TF2.8 * update tests * bump requirements.txt versions * Remove limit on nvidia-ml-py * Graphing bugfixes - Prevent live graph from displaying if data not yet available * bugfix: Live graph. Collect loss labels correctly * fix: live graph - swallow inconsistent loss errors * Bugfix: Prevent live graph from clearing during training * Fix graphing for AMD
175
0
19,905
13
3
29
def _load_icons(): size = get_config().user_config_dict.get("icon_size", 16) size = int(round(size * get_config().scaling_factor)) icons = {} pathicons = os.path.join(PATHCACHE, "icons") for fname in os.listdir(pathicons): name, ext = os.path.splitext(fname) if ext != ".png": continue img = Image.open(os.path.join(pathicons, fname)) img = ImageTk.PhotoImage(img.resize((size, size), resample=Image.HAMMING)) icons[name] = img logger.debug(icons) return icons
lib/gui/utils.py
216
faceswap
{ "docstring": " Scan the icons cache folder and load the icons into :attr:`icons` for retrieval\n throughout the GUI.\n\n Returns\n -------\n dict:\n The icons formatted as described in :attr:`icons`\n\n ", "language": "en", "n_whitespaces": 73, "n_words": 26, "vocab_size": 21 }
46
Python
34
c1512fd41d86ef47a5d1ce618d6d755ef7cbacdf
utils.py
100,336
14
132
_load_icons
https://github.com/deepfakes/faceswap.git
Update code to support Tensorflow versions up to 2.8 (#1213) * Update maximum tf version in setup + requirements * - bump max version of tf version in launcher - standardise tf version check * update keras get_custom_objects for tf>2.6 * bugfix: force black text in GUI file dialogs (linux) * dssim loss - Move to stock tf.ssim function * Update optimizer imports for compatibility * fix logging for tf2.8 * Fix GUI graphing for TF2.8 * update tests * bump requirements.txt versions * Remove limit on nvidia-ml-py * Graphing bugfixes - Prevent live graph from displaying if data not yet available * bugfix: Live graph. Collect loss labels correctly * fix: live graph - swallow inconsistent loss errors * Bugfix: Prevent live graph from clearing during training * Fix graphing for AMD
172
0
19,832
14
2
10
def run_coroutine_threadsafe(coro, loop): if not coroutines.iscoroutine(coro): raise TypeError('A coroutine object is required') future = concurrent.futures.Future()
python3.10.4/Lib/asyncio/tasks.py
55
XX-Net
{ "docstring": "Submit a coroutine object to a given event loop.\n\n Return a concurrent.futures.Future to access the result.\n ", "language": "en", "n_whitespaces": 22, "n_words": 16, "vocab_size": 13 }
15
Python
15
8198943edd73a363c266633e1aa5b2a9e9c9f526
tasks.py
220,805
7
41
run_coroutine_threadsafe
https://github.com/XX-net/XX-Net.git
add python 3.10.4 for windows
31
0
56,124
10
4
32
def find_airflow_sources_root_to_operate_on() -> Path: installation_airflow_sources = get_installation_airflow_sources() if installation_airflow_sources is None and not skip_upgrade_check(): console.print( "\n[red]Breeze should only be installed with -e flag[/]\n\n" "[bright_yellow]Please go to Airflow sources and run[/]\n\n" f" {NAME} self-upgrade --force\n" ) sys.exit(1) airflow_sources = get_used_airflow_sources() if not skip_upgrade_check(): # only print warning and sleep if not producing complete results print_warning_if_different_sources(airflow_sources) print_warning_if_setup_changed() console.print(f"[bright_blue]Airflow sources: {airflow_sources}[/]") os.chdir(str(airflow_sources)) return airflow_sources AIRFLOW_SOURCES_ROOT = find_airflow_sources_root_to_operate_on() BUILD_CACHE_DIR = AIRFLOW_SOURCES_ROOT / '.build' FILES_DIR = AIRFLOW_SOURCES_ROOT / 'files' MSSQL_DATA_VOLUME = AIRFLOW_SOURCES_ROOT / 'tmp_mssql_volume' MYPY_CACHE_DIR = AIRFLOW_SOURCES_ROOT / '.mypy_cache' LOGS_DIR = AIRFLOW_SOURCES_ROOT / 'logs' DIST_DIR = AIRFLOW_SOURCES_ROOT / 'dist' SCRIPTS_CI_DIR = AIRFLOW_SOURCES_ROOT / 'scripts' / 'ci' DOCKER_CONTEXT_DIR = AIRFLOW_SOURCES_ROOT / 'docker-context-files' CACHE_TMP_FILE_DIR = tempfile.TemporaryDirectory() OUTPUT_LOG = Path(CACHE_TMP_FILE_DIR.name, 'out.log') BREEZE_SOURCES_ROOT = AIRFLOW_SOURCES_ROOT / "dev" / "breeze"
dev/breeze/src/airflow_breeze/utils/path_utils.py
286
airflow
{ "docstring": "\n Find the root of airflow sources we operate on. Handle the case when Breeze is installed via `pipx` from\n a different source tree, so it searches upwards of the current directory to find the right root of\n airflow directory we are actually in. This **might** be different than the sources of Airflow Breeze\n was installed from.\n\n If not found, we operate on Airflow sources that we were installed it. This handles the case when\n we run Breeze from a \"random\" directory.\n\n This method also handles the following errors and warnings:\n\n * It fails (and exits hard) if Breeze is installed in non-editable mode (in which case it will\n not find the Airflow sources when walking upwards the directory where it is installed)\n * It warns (with 2 seconds timeout) if you are using Breeze from a different airflow sources than\n the one you operate on.\n * If we are running in the same source tree as where Breeze was installed from (so no warning above),\n it warns (with 2 seconds timeout) if there is a change in setup.* files of Breeze since installation\n time. In such case usesr is encouraged to re-install Breeze to update dependencies.\n\n :return: Path for the found sources.\n\n ", "language": "en", "n_whitespaces": 280, "n_words": 202, "vocab_size": 109 }
120
Python
77
bca849b4586c7446438f959b62903da4b997b9ea
path_utils.py
46,871
38
73
find_airflow_sources_root_to_operate_on
https://github.com/apache/airflow.git
Switch to `pipx` as the only installation Breeze2 method (#22740) Switching Breeze2 to only use `pipx` for installation of Breeze2 due to problems it might cause for autocompletion if entrypoint is not avaiable on PATH.
211
0
9,029
12
5
4
def process_task(self, task): # processing tasks so that no arguments are included # unless it's a fromfile or jsonfile one if 'fromfile:' in task or 'jsonfile:' in task or 'internal:' in task: return None if self.ignore_task else task return task ########################################## # generation setup-related class functions ##########################################
parlai/scripts/generate_model_card.py
60
ParlAI
{ "docstring": "\n tries to remap tasks to their external version, and then may ignore the tasks\n w/o ext.\n\n version depending on `ignore_task`\n ", "language": "en", "n_whitespaces": 49, "n_words": 20, "vocab_size": 18 }
47
Python
35
81f722d29045a7a5841d0931a082ded1d1f13863
generate_model_card.py
194,778
4
31
process_task
https://github.com/facebookresearch/ParlAI.git
autoformat (#4378)
102
0
47,071
9
2
5
def should_save(self): if self.save_on_each_node: return self.local_process_index == 0 else: return self.process_index == 0
paddlenlp/trainer/trainer_args.py
43
PaddleNLP
{ "docstring": "\n Whether or not the current process should write to disk, e.g., to save models and checkpoints.\n ", "language": "en", "n_whitespaces": 31, "n_words": 16, "vocab_size": 15 }
13
Python
10
44a290e94d1becd1f09fddc3d873f9e19c9d6919
trainer_args.py
323,119
5
25
should_save
https://github.com/PaddlePaddle/PaddleNLP.git
[Trainer] Add init version of paddlenlp trainer and apply finetune for ernie-1.0 pretraining. (#1761) * add some datasets for finetune. * support fine tune for all tastks. * add trainer prototype. * init verison for paddlenlp trainer. * refine trainer. * update for some details. * support multi-cards training evaluation. * support load from ckpt. * support for export inference model. * first version of trainer. * seq cls support clue. * trainer support for token classification and question answersing tasks. * fix as reviews. Co-authored-by: Zeyu Chen <[email protected]>
56
0
118,366
10
1
6
def get_default_rl_module_class(self) -> Union[Type["RLModule"], str]: raise NotImplementedError
rllib/algorithms/algorithm_config.py
31
ray
{ "docstring": "Returns the RLModule class to use for this algorithm.\n\n Override this method in the sub-class to return the RLModule class type given\n the input framework.\n\n Returns:\n The RLModule class to use for this algorithm either as a class type or as\n a string (e.g. x.y.z).\n ", "language": "en", "n_whitespaces": 95, "n_words": 45, "vocab_size": 28 }
7
Python
7
f9ec2d1ae2e14e1f1ed38d315dfd643f600dc397
algorithm_config.py
137,403
11
18
get_default_rl_module_class
https://github.com/ray-project/ray.git
[RLlib] Make RLModule initialization easy (#31069) 1. Moved the `_enable_rl_module_api` signature into `rl_module()` api of the algorithmConfig. 2. Added the ability for the user to override the entire RLModule from algorithmConfig by simply changing the class. 3. updated marl_module: we now have only one MARLModule base-class that can be used stand-alone, users can override it completely if they want. 4. Removed test_torch_marl_module (Will add it back in a framework agnostic way) 5. Updated TorchMARL and RL modules to use the new constructor format. 6. Core tests now works independent of failures of PPORLModule. 7. Core tests is now based on factory methods of RLModule. 8. created a new isolated unittest for marl_module 9. update ppo torch RL module to adhere to the new API changes. 10. get_rl_module_class is now a instance method instead of classmethod 11. made enabling the api more explicit from algo_config() Signed-off-by: Kourosh Hakhamaneshi <[email protected]>
21
0
31,158
7
1
4
def host(self) -> str: return self.first_pod_args.host
jina/orchestrate/deployments/__init__.py
25
jina
{ "docstring": "Get the host name of this deployment\n\n\n .. # noqa: DAR201\n ", "language": "en", "n_whitespaces": 25, "n_words": 11, "vocab_size": 11 }
6
Python
6
13edc16d806fb5d77a6849551178ccc75937f25f
__init__.py
10,837
7
14
host
https://github.com/jina-ai/jina.git
refactor: rename pod to deployment (#4230) * refactor: rename pod to deployment * style: fix overload and cli autocomplete * fix: undo daemon mistake * refactor: leftover cleanup * fix: more test fixes * fix: more fixes * fix: more fixes * fix: more fixes * fix: more tests * fix: fix more tests * refactor: fix more tests * refactor: more tests fixes * refactor: rename pea to pod * refactor: adjust docs * refactor: complete pea renaming * refactor: more fixes * fix: pea_type in k8s yamls * fix: adjust pod args name * refactor: rename peapods parser folder * fix: da init Co-authored-by: Jina Dev Bot <[email protected]>
20
0
1,940
7
20
19
def select_traces(self, selector=None, row=None, col=None, secondary_y=None): if not selector and not isinstance(selector, int): selector = {} if row is not None or col is not None or secondary_y is not None: grid_ref = self._validate_get_grid_ref() filter_by_subplot = True if row is None and col is not None: # All rows for column grid_subplot_ref_tuples = [ref_row[col - 1] for ref_row in grid_ref] elif col is None and row is not None: # All columns for row grid_subplot_ref_tuples = grid_ref[row - 1] elif col is not None and row is not None: # Single grid cell grid_subplot_ref_tuples = [grid_ref[row - 1][col - 1]] else: # row and col are None, secondary_y not None grid_subplot_ref_tuples = [ refs for refs_row in grid_ref for refs in refs_row ] # Collect list of subplot refs, taking secondary_y into account grid_subplot_refs = [] for refs in grid_subplot_ref_tuples: if not refs: continue if secondary_y is not True: grid_subplot_refs.append(refs[0]) if secondary_y is not False and len(refs) > 1: grid_subplot_refs.append(refs[1]) else: filter_by_subplot = False grid_subplot_refs = None return self._perform_select_traces( filter_by_subplot, grid_subplot_refs, selector )
packages/python/plotly/plotly/basedatatypes.py
335
plotly.py
{ "docstring": "\n Select traces from a particular subplot cell and/or traces\n that satisfy custom selection criteria.\n\n Parameters\n ----------\n selector: dict, function, int, str or None (default None)\n Dict to use as selection criteria.\n Traces will be selected if they contain properties corresponding\n to all of the dictionary's keys, with values that exactly match\n the supplied values. If None (the default), all traces are\n selected. If a function, it must be a function accepting a single\n argument and returning a boolean. The function will be called on\n each trace and those for which the function returned True\n will be in the selection. If an int N, the Nth trace matching row\n and col will be selected (N can be negative). If a string S, the selector\n is equivalent to dict(type=S).\n row, col: int or None (default None)\n Subplot row and column index of traces to select.\n To select traces by row and column, the Figure must have been\n created using plotly.subplots.make_subplots. If None\n (the default), all traces are selected.\n secondary_y: boolean or None (default None)\n * If True, only select traces associated with the secondary\n y-axis of the subplot.\n * If False, only select traces associated with the primary\n y-axis of the subplot.\n * If None (the default), do not filter traces based on secondary\n y-axis.\n\n To select traces by secondary y-axis, the Figure must have been\n created using plotly.subplots.make_subplots. See the docstring\n for the specs argument to make_subplots for more info on\n creating subplots with secondary y-axes.\n Returns\n -------\n generator\n Generator that iterates through all of the traces that satisfy\n all of the specified selection criteria\n ", "language": "en", "n_whitespaces": 635, "n_words": 264, "vocab_size": 140 }
173
Python
78
321c77230a794d9f0595038a2674c955889ed0e3
basedatatypes.py
241,434
30
217
select_traces
https://github.com/plotly/plotly.py.git
Avoid selecting all traces on selector=0
614
0
69,523
15
2
12
def _max_values(self) -> np.array: if self._max_vals_cache is not None: return self._max_vals_cache return np.array(list(map(lambda x: x.max_val, self.flat_scalars))).reshape( self.shape )
packages/syft/src/syft/core/tensor/autodp/intermediate_gamma.py
82
PySyft
{ "docstring": "WARNING: DO NOT MAKE THIS AVAILABLE TO THE POINTER!!!\n DO NOT ADD THIS METHOD TO THE AST!!!\n ", "language": "en", "n_whitespaces": 31, "n_words": 17, "vocab_size": 12 }
18
Python
16
f56f2b6fb96017472095a43f7d6b13bb8c21718f
intermediate_gamma.py
243
9
51
_max_values
https://github.com/OpenMined/PySyft.git
reduced reliance on .flat_scalars for __add__ which is very slow
68
0
74
15
4
16
def integrate(*args, meijerg=None, conds='piecewise', risch=None, heurisch=None, manual=None, **kwargs): doit_flags = { 'deep': False, 'meijerg': meijerg, 'conds': conds, 'risch': risch, 'heurisch': heurisch, 'manual': manual } integral = Integral(*args, **kwargs) if isinstance(integral, Integral): return integral.doit(**doit_flags) else: new_args = [a.doit(**doit_flags) if isinstance(a, Integral) else a for a in integral.args] return integral.func(*new_args)
sympy/integrals/integrals.py
190
sympy
{ "docstring": "integrate(f, var, ...)\n\n Explanation\n ===========\n\n Compute definite or indefinite integral of one or more variables\n using Risch-Norman algorithm and table lookup. This procedure is\n able to handle elementary algebraic and transcendental functions\n and also a huge class of special functions, including Airy,\n Bessel, Whittaker and Lambert.\n\n var can be:\n\n - a symbol -- indefinite integration\n - a tuple (symbol, a) -- indefinite integration with result\n given with ``a`` replacing ``symbol``\n - a tuple (symbol, a, b) -- definite integration\n\n Several variables can be specified, in which case the result is\n multiple integration. (If var is omitted and the integrand is\n univariate, the indefinite integral in that variable will be performed.)\n\n Indefinite integrals are returned without terms that are independent\n of the integration variables. (see examples)\n\n Definite improper integrals often entail delicate convergence\n conditions. Pass conds='piecewise', 'separate' or 'none' to have\n these returned, respectively, as a Piecewise function, as a separate\n result (i.e. result will be a tuple), or not at all (default is\n 'piecewise').\n\n **Strategy**\n\n SymPy uses various approaches to definite integration. One method is to\n find an antiderivative for the integrand, and then use the fundamental\n theorem of calculus. Various functions are implemented to integrate\n polynomial, rational and trigonometric functions, and integrands\n containing DiracDelta terms.\n\n SymPy also implements the part of the Risch algorithm, which is a decision\n procedure for integrating elementary functions, i.e., the algorithm can\n either find an elementary antiderivative, or prove that one does not\n exist. There is also a (very successful, albeit somewhat slow) general\n implementation of the heuristic Risch algorithm. This algorithm will\n eventually be phased out as more of the full Risch algorithm is\n implemented. See the docstring of Integral._eval_integral() for more\n details on computing the antiderivative using algebraic methods.\n\n The option risch=True can be used to use only the (full) Risch algorithm.\n This is useful if you want to know if an elementary function has an\n elementary antiderivative. If the indefinite Integral returned by this\n function is an instance of NonElementaryIntegral, that means that the\n Risch algorithm has proven that integral to be non-elementary. Note that\n by default, additional methods (such as the Meijer G method outlined\n below) are tried on these integrals, as they may be expressible in terms\n of special functions, so if you only care about elementary answers, use\n risch=True. Also note that an unevaluated Integral returned by this\n function is not necessarily a NonElementaryIntegral, even with risch=True,\n as it may just be an indication that the particular part of the Risch\n algorithm needed to integrate that function is not yet implemented.\n\n Another family of strategies comes from re-writing the integrand in\n terms of so-called Meijer G-functions. Indefinite integrals of a\n single G-function can always be computed, and the definite integral\n of a product of two G-functions can be computed from zero to\n infinity. Various strategies are implemented to rewrite integrands\n as G-functions, and use this information to compute integrals (see\n the ``meijerint`` module).\n\n The option manual=True can be used to use only an algorithm that tries\n to mimic integration by hand. This algorithm does not handle as many\n integrands as the other algorithms implemented but may return results in\n a more familiar form. The ``manualintegrate`` module has functions that\n return the steps used (see the module docstring for more information).\n\n In general, the algebraic methods work best for computing\n antiderivatives of (possibly complicated) combinations of elementary\n functions. The G-function methods work best for computing definite\n integrals from zero to infinity of moderately complicated\n combinations of special functions, or indefinite integrals of very\n simple combinations of special functions.\n\n The strategy employed by the integration code is as follows:\n\n - If computing a definite integral, and both limits are real,\n and at least one limit is +- oo, try the G-function method of\n definite integration first.\n\n - Try to find an antiderivative, using all available methods, ordered\n by performance (that is try fastest method first, slowest last; in\n particular polynomial integration is tried first, Meijer\n G-functions second to last, and heuristic Risch last).\n\n - If still not successful, try G-functions irrespective of the\n limits.\n\n The option meijerg=True, False, None can be used to, respectively:\n always use G-function methods and no others, never use G-function\n methods, or use all available methods (in order as described above).\n It defaults to None.\n\n Examples\n ========\n\n >>> from sympy import integrate, log, exp, oo\n >>> from sympy.abc import a, x, y\n\n >>> integrate(x*y, x)\n x**2*y/2\n\n >>> integrate(log(x), x)\n x*log(x) - x\n\n >>> integrate(log(x), (x, 1, a))\n a*log(a) - a + 1\n\n >>> integrate(x)\n x**2/2\n\n Terms that are independent of x are dropped by indefinite integration:\n\n >>> from sympy import sqrt\n >>> integrate(sqrt(1 + x), (x, 0, x))\n 2*(x + 1)**(3/2)/3 - 2/3\n >>> integrate(sqrt(1 + x), x)\n 2*(x + 1)**(3/2)/3\n\n >>> integrate(x*y)\n Traceback (most recent call last):\n ...\n ValueError: specify integration variables to integrate x*y\n\n Note that ``integrate(x)`` syntax is meant only for convenience\n in interactive sessions and should be avoided in library code.\n\n >>> integrate(x**a*exp(-x), (x, 0, oo)) # same as conds='piecewise'\n Piecewise((gamma(a + 1), re(a) > -1),\n (Integral(x**a*exp(-x), (x, 0, oo)), True))\n\n >>> integrate(x**a*exp(-x), (x, 0, oo), conds='none')\n gamma(a + 1)\n\n >>> integrate(x**a*exp(-x), (x, 0, oo), conds='separate')\n (gamma(a + 1), re(a) > -1)\n\n See Also\n ========\n\n Integral, Integral.doit\n\n ", "language": "en", "n_whitespaces": 1292, "n_words": 865, "vocab_size": 406 }
48
Python
43
1eeb01e15f06c6692a5bfd6fd2d2a3002d864a07
integrals.py
196,850
16
119
integrate
https://github.com/sympy/sympy.git
Fix a few docstring formatting issues
144
0
48,217
14
9
27
def get_cluster_status_to_report(gcs_client, num_retries) -> ClusterStatusToReport: try: cluster_status = ray._private.utils.internal_kv_get_with_retry( gcs_client, ray.ray_constants.DEBUG_AUTOSCALING_STATUS, namespace=None, num_retries=num_retries, ) if not cluster_status: return ClusterStatusToReport() result = ClusterStatusToReport() to_GiB = 1 / 2 ** 30 cluster_status = json.loads(cluster_status.decode("utf-8")) if ( "load_metrics_report" not in cluster_status or "usage" not in cluster_status["load_metrics_report"] ): return ClusterStatusToReport() usage = cluster_status["load_metrics_report"]["usage"] # usage is a map from resource to (used, total) pair if "CPU" in usage: result.total_num_cpus = int(usage["CPU"][1]) if "GPU" in usage: result.total_num_gpus = int(usage["GPU"][1]) if "memory" in usage: result.total_memory_gb = usage["memory"][1] * to_GiB if "object_store_memory" in usage: result.total_object_store_memory_gb = ( usage["object_store_memory"][1] * to_GiB ) return result except Exception as e: logger.info(f"Failed to get cluster status to report {e}") return ClusterStatusToReport()
python/ray/_private/usage/usage_lib.py
337
ray
{ "docstring": "Get the current status of this cluster.\n\n It is a blocking API.\n\n Params:\n gcs_client (GCSClient): The GCS client to perform KV operation GET.\n num_retries (int): Max number of times to retry if GET fails.\n\n Returns:\n The current cluster status or empty if it fails to get that information.\n ", "language": "en", "n_whitespaces": 81, "n_words": 48, "vocab_size": 41 }
110
Python
70
62a5404369d71a84fdd4da9c4bfd597fce33f2f6
usage_lib.py
147,021
44
195
get_cluster_status_to_report
https://github.com/ray-project/ray.git
Collect more usage stats data (#23167)
396
0
33,835
15
1
2
def zoom(self): return self["zoom"]
packages/python/plotly/plotly/graph_objs/layout/_mapbox.py
22
plotly.py
{ "docstring": "\n Sets the zoom level of the map (mapbox.zoom).\n\n The 'zoom' property is a number and may be specified as:\n - An int or float\n\n Returns\n -------\n int|float\n ", "language": "en", "n_whitespaces": 79, "n_words": 27, "vocab_size": 26 }
4
Python
4
43e3a4011080911901176aab919c0ecf5046ddd3
_mapbox.py
231,602
2
11
zoom
https://github.com/plotly/plotly.py.git
switch to black .22
18
0
63,046
7
2
14
def _set_locations(self) -> Dict[str, str]: default: str = self.loc["root"] temp: str = os.path.join(self.root, "ports.conf") if os.path.isfile(temp): listen = temp name = temp else: listen = default name = default return {"default": default, "listen": listen, "name": name}
certbot-apache/certbot_apache/_internal/parser.py
130
certbot
{ "docstring": "Set default location for directives.\n\n Locations are given as file_paths\n .. todo:: Make sure that files are included\n\n ", "language": "en", "n_whitespaces": 39, "n_words": 18, "vocab_size": 17 }
36
Python
26
7d9e9a49005de7961e84d2a7c608db57dbab3046
parser.py
186,683
16
77
_set_locations
https://github.com/certbot/certbot.git
Add typing to certbot.apache (#9071) * Add typing to certbot.apache Co-authored-by: Adrien Ferrand <[email protected]>
122
0
45,590
9
2
10
def get_valuation_rate(): item_val_rate_map = {} for d in frappe.db.sql( , as_dict=1, ): item_val_rate_map.setdefault(d.item_code, d.val_rate) return item_val_rate_map
erpnext/stock/report/item_prices/item_prices.py
66
erpnext
{ "docstring": "Get an average valuation rate of an item from all warehousesselect item_code,\n\t\tsum(actual_qty*valuation_rate)/sum(actual_qty) as val_rate\n\t\tfrom tabBin where actual_qty > 0 group by item_code", "language": "en", "n_whitespaces": 21, "n_words": 24, "vocab_size": 22 }
16
Python
15
494bd9ef78313436f0424b918f200dab8fc7c20b
item_prices.py
67,881
10
40
get_valuation_rate
https://github.com/frappe/erpnext.git
style: format code with black
8
0
14,653
10
2
2
def test_order_by(self) -> None:
tests/rest/admin/test_federation.py
16
synapse
{ "docstring": "\n Testing order list with parameter `order_by`\n ", "language": "en", "n_whitespaces": 21, "n_words": 6, "vocab_size": 6 }
4
Python
4
3b51c763ba5601e155e3e27a46cddf0370da83eb
test_federation.py
246,024
48
645
test_order_by
https://github.com/matrix-org/synapse.git
Fix get federation status of destination if no error occured (#11593)
11
0
70,953
6
10
21
def _get_locale_dirs(resources, include_core=True): contrib_dir = os.path.join(os.getcwd(), "django", "contrib") dirs = [] # Collect all locale directories for contrib_name in os.listdir(contrib_dir): path = os.path.join(contrib_dir, contrib_name, "locale") if os.path.isdir(path): dirs.append((contrib_name, path)) if contrib_name in HAVE_JS: dirs.append(("%s-js" % contrib_name, path)) if include_core: dirs.insert(0, ("core", os.path.join(os.getcwd(), "django", "conf", "locale"))) # Filter by resources, if any if resources is not None: res_names = [d[0] for d in dirs] dirs = [ld for ld in dirs if ld[0] in resources] if len(resources) > len(dirs): print( "You have specified some unknown resources. " "Available resource names are: %s" % (", ".join(res_names),) ) exit(1) return dirs
scripts/manage_translations.py
315
django
{ "docstring": "\n Return a tuple (contrib name, absolute path) for all locale directories,\n optionally including the django core catalog.\n If resources list is not None, filter directories matching resources content.\n ", "language": "en", "n_whitespaces": 41, "n_words": 28, "vocab_size": 27 }
98
Python
72
9c19aff7c7561e3a82978a272ecdaad40dda5c00
manage_translations.py
206,916
21
191
_get_locale_dirs
https://github.com/django/django.git
Refs #33476 -- Reformatted code with Black.
267
0
51,797
16
11
27
def parse_sources(self, cache=False): parsed = False # allow for multiple inventory parsing for source in self._sources: if source: if ',' not in source: source = unfrackpath(source, follow=False) parse = self.parse_source(source, cache=cache) if parse and not parsed: parsed = True if parsed: # do post processing self._inventory.reconcile_inventory() else: if C.INVENTORY_UNPARSED_IS_FAILED: raise AnsibleError("No inventory was parsed, please check your configuration and options.") elif C.INVENTORY_UNPARSED_WARNING: display.warning("No inventory was parsed, only implicit localhost is available") for group in self.groups.values(): group.vars = combine_vars(group.vars, get_vars_from_inventory_sources(self._loader, self._sources, [group], 'inventory')) for host in self.hosts.values(): host.vars = combine_vars(host.vars, get_vars_from_inventory_sources(self._loader, self._sources, [host], 'inventory'))
lib/ansible/inventory/manager.py
272
ansible
{ "docstring": " iterate over inventory sources and parse each one to populate it", "language": "en", "n_whitespaces": 11, "n_words": 11, "vocab_size": 11 }
93
Python
63
5b44035983aba190791df479fa7004ce20872042
manager.py
266,994
20
169
parse_sources
https://github.com/ansible/ansible.git
Hide "[WARNING]: No inventory was parsed" message (#65499) * Add config option INVENTORY_UNPARSED_WARNING to hide the warning "No inventory was parsed, only implicit localhost is available"
339
0
78,680
15
1
28
def test_send_receipts_with_backoff(self): mock_send_transaction = ( self.hs.get_federation_transport_client().send_transaction ) mock_send_transaction.return_value = make_awaitable({}) sender = self.hs.get_federation_sender() receipt = ReadReceipt( "room_id", "m.read", "user_id", ["event_id"], {"ts": 1234} ) self.successResultOf(defer.ensureDeferred(sender.send_read_receipt(receipt))) self.pump() # expect a call to send_transaction mock_send_transaction.assert_called_once() json_cb = mock_send_transaction.call_args[0][1] data = json_cb() self.assertEqual( data["edus"], [ { "edu_type": EduTypes.RECEIPT, "content": { "room_id": { "m.read": { "user_id": { "event_ids": ["event_id"], "data": {"ts": 1234}, } } } }, } ], ) mock_send_transaction.reset_mock() # send the second RR receipt = ReadReceipt( "room_id", "m.read", "user_id", ["other_id"], {"ts": 1234} ) self.successResultOf(defer.ensureDeferred(sender.send_read_receipt(receipt))) self.pump() mock_send_transaction.assert_not_called() self.reactor.advance(19) mock_send_transaction.assert_not_called() self.reactor.advance(10) mock_send_transaction.assert_called_once() json_cb = mock_send_transaction.call_args[0][1] data = json_cb() self.assertEqual( data["edus"], [ { "edu_type": EduTypes.RECEIPT, "content": { "room_id": { "m.read": { "user_id": { "event_ids": ["other_id"], "data": {"ts": 1234}, } } } }, } ], )
tests/federation/test_federation_sender.py
519
synapse
{ "docstring": "Send two receipts in quick succession; the second should be flushed, but\n only after 20ms", "language": "en", "n_whitespaces": 21, "n_words": 15, "vocab_size": 15 }
119
Python
57
c52abc1cfdd9e5480cdb4a03d626fe61cacc6573
test_federation_sender.py
248,389
63
296
test_send_receipts_with_backoff
https://github.com/matrix-org/synapse.git
Additional constants for EDU types. (#12884) Instead of hard-coding strings in many places.
1,066
0
72,260
21
4
10
def register(self, name, color_list): if name in self._BUILTIN_COLOR_SEQUENCES: raise ValueError(f"{name!r} is a reserved name for a builtin " "color sequence") color_list = list(color_list) # force copy and coerce type to list for color in color_list: try: to_rgba(color) except ValueError: raise ValueError( f"{color!r} is not a valid color specification") self._color_sequences[name] = color_list
lib/matplotlib/colors.py
108
matplotlib
{ "docstring": "\n Register a new color sequence.\n\n The color sequence registry stores a copy of the given *color_list*, so\n that future changes to the original list do not affect the registered\n color sequence. Think of this as the registry taking a snapshot\n of *color_list* at registration.\n\n Parameters\n ----------\n name : str\n The name for the color sequence.\n\n color_list : list of colors\n An iterable returning valid Matplotlib colors when iterating over.\n Note however that the returned color sequence will always be a\n list regardless of the input type.\n\n ", "language": "en", "n_whitespaces": 201, "n_words": 86, "vocab_size": 58 }
51
Python
41
0abe0ce2f2748d1d0383154d045da3609a4b871b
colors.py
108,332
12
58
register
https://github.com/matplotlib/matplotlib.git
Add a registry for color sequences Color sequences are simply lists of colors, that we store by name in a registry. The registry is modelled similar to the ColormapRegistry to 1) support immutable builtin color sequences and 2) to return copies so that one cannot mess with the global definition of the color sequence through an obtained instance. For now, I've made the sequences used for `ListedColormap`s available as builtin sequences, but that's open for discussion. More usage documentation should be added in the color examples and/or tutorials, but I'll wait with that till after the general approval of the structure and API. One common use case will be ``` plt.rc_params['axes.prop_cycle'] = plt.cycler(color=plt.color_sequences['Pastel1') ``` Co-authored-by: Elliott Sales de Andrade <[email protected]>
197
0
23,144
14
1
6
def quantize_output(self, output, config, **kwargs): # Put your code to generate `new_output` here new_output = ... return new_output
docs/source/tutorials/quantization_customize.py
30
nni
{ "docstring": "\n quantize should overload this method to quantize output.\n This method is effectively hooked to `:meth:`forward` of the model.\n\n Parameters\n ----------\n output : Tensor\n output that needs to be quantized\n config : dict\n the configuration for output quantization\n ", "language": "en", "n_whitespaces": 109, "n_words": 37, "vocab_size": 29 }
18
Python
17
a16212368718dccf6e3e07f0d9da950a365a3f90
quantization_customize.py
111,927
3
18
quantize_output
https://github.com/microsoft/nni.git
update customize compressor (#4639)
46
0
24,510
6
1
9
async def connect(self) -> None: self.client = WebOsClient(self.host, self.client_key) with suppress(*WEBOSTV_EXCEPTIONS, WebOsTvPairError): await self.client.connect()
homeassistant/components/webostv/__init__.py
68
core
{ "docstring": "Attempt a connection, but fail gracefully if tv is off for example.", "language": "en", "n_whitespaces": 11, "n_words": 12, "vocab_size": 12 }
14
Python
14
dee843bf6e5ca84a94f336a239f6a6138c4c28e6
__init__.py
309,593
5
39
connect
https://github.com/home-assistant/core.git
Add LG webOS Smart TV config flow support (#64117) * Add webOS Smart TV config flow support (#53256) * Add Webostv config flow * Fix tests mocks and apply review comments * Apply review comments * Change config flow to use ssdp UDN as unique_id * Fix device info * More review comments * Fix _async_check_configured_entry * Remove turn on script * Add webOS Smart TV device triggers (#53752) * Add webOS Smart TV config flow support (#53256) * Add Webostv config flow * Fix tests mocks and apply review comments * Apply review comments * Change config flow to use ssdp UDN as unique_id * Fix device info * More review comments * Fix _async_check_configured_entry * Remove turn on script * Add webOS Smart TV device triggers (#53752) * Fix webOS Smart TV mypy and pylint errors (#62620) * Change webOS Smart TV PyPi aiopylgtv package to bscpylgtv (#62633) * Change webOS Smart TV PyPi aiopylgtv package to bscpylgtv * Update bscpylgtv to 0.2.8 (revised websockets requirment) * Change webOS Smart TV PyPi package to aiowebostv (#63759) * Change webOS Smart TV PyPi package to aiowebostv * Apply suggestions from code review Co-authored-by: Martin Hjelmare <[email protected]> Co-authored-by: Martin Hjelmare <[email protected]> * webOS TV check UUID for user added device (#63817) * webOS TV check uuid when for user added device * Apply suggestions from code review Co-authored-by: Martin Hjelmare <[email protected]> * Add test for form abort and host update Co-authored-by: Martin Hjelmare <[email protected]> * Rework webOS Smart TV device trigger to custom trigger platform (#63950) * Rework webOS Smart TV device trigger to custom trigger platform * Review comments and add tests * Fix webOS TV import from YAML (#63996) * Fix webOS TV import from YAML * Fix requirements * Migrate YAML entities unique id to UUID * Add backoff to migration task delay * Assert result data and unique_id * Apply suggestions from code review Co-authored-by: Martin Hjelmare <[email protected]> * Add codeowner Co-authored-by: Martin Hjelmare <[email protected]>
46
0
108,291
11
8
44
def _inject_greasemonkey_scripts(self, scripts): if sip.isdeleted(self._widget): return # Since we are inserting scripts into a per-tab collection, # rather than just injecting scripts on page load, we need to # make sure we replace existing scripts, not just add new ones. # While, taking care not to remove any other scripts that might # have been added elsewhere, like the one for stylesheets. page_scripts = self._widget.page().scripts() self._remove_all_greasemonkey_scripts() seen_names = set() for script in scripts: while script.full_name() in seen_names: script.dedup_suffix += 1 seen_names.add(script.full_name()) new_script = QWebEngineScript() try: world = int(script.jsworld) if not 0 <= world <= qtutils.MAX_WORLD_ID: log.greasemonkey.error( f"script {script.name} has invalid value for '@qute-js-world'" f": {script.jsworld}, should be between 0 and " f"{qtutils.MAX_WORLD_ID}") continue except ValueError: try: world = _JS_WORLD_MAP[usertypes.JsWorld[script.jsworld.lower()]] except KeyError: log.greasemonkey.error( f"script {script.name} has invalid value for '@qute-js-world'" f": {script.jsworld}") continue new_script.setWorldId(world) # Corresponds to "@run-at document-end" which is the default according to # https://wiki.greasespot.net/Metadata_Block#.40run-at - however, # QtWebEngine uses QWebEngineScript.InjectionPoint.Deferred (@run-at document-idle) as # default. # # NOTE that this needs to be done before setSourceCode, so that # QtWebEngine's parsing of GreaseMonkey tags will override it if there is a # @run-at comment. new_script.setInjectionPoint(QWebEngineScript.InjectionPoint.DocumentReady) new_script.setSourceCode(script.code()) new_script.setName(script.full_name()) new_script.setRunsOnSubFrames(script.runs_on_sub_frames) if script.needs_document_end_workaround(): log.greasemonkey.debug( f"Forcing @run-at document-end for {script.name}") new_script.setInjectionPoint(QWebEngineScript.InjectionPoint.DocumentReady) log.greasemonkey.debug(f'adding script: {new_script.name()}') page_scripts.insert(new_script)
qutebrowser/browser/webengine/webenginetab.py
453
qutebrowser
{ "docstring": "Register user JavaScript files with the current tab.\n\n Args:\n scripts: A list of GreasemonkeyScripts.\n ", "language": "en", "n_whitespaces": 39, "n_words": 14, "vocab_size": 14 }
203
Python
145
0877fb0d78635692e481c8bde224fac5ad0dd430
webenginetab.py
321,142
38
232
_inject_greasemonkey_scripts
https://github.com/qutebrowser/qutebrowser.git
Run scripts/dev/rewrite_enums.py
856
0
117,562
19
3
15
def get_heading(self, queryset, field): heading_override = self.export_headings.get(field) if heading_override: return force_str(heading_override) try: return force_str(queryset.model._meta.get_field(field).verbose_name.title()) except (AttributeError, FieldDoesNotExist): return force_str(field)
wagtail/admin/views/mixins.py
100
wagtail
{ "docstring": "Get the heading label for a given field for a spreadsheet generated from queryset", "language": "en", "n_whitespaces": 13, "n_words": 14, "vocab_size": 12 }
19
Python
17
d10f15e55806c6944827d801cd9c2d53f5da4186
mixins.py
72,431
8
62
get_heading
https://github.com/wagtail/wagtail.git
Reformat with black
87
0
15,894
16
8
27
def to_euler(self, seq, angle_addition=True, avoid_square_root=False): extrinsic = _is_extrinsic(seq) i, j, k = seq.lower() # get index corresponding to elementary basis vectors i = 'xyz'.index(i) + 1 j = 'xyz'.index(j) + 1 k = 'xyz'.index(k) + 1 if not extrinsic: i, k = k, i # check if sequence is symmetric symmetric = i == k if symmetric: k = 6 - i - j # parity of the permutation sign = (i - j) * (j - k) * (k - i) // 2 # permutate elements elements = [self.a, self.b, self.c, self.d] a = elements[0] b = elements[i] c = elements[j] d = elements[k] * sign if not symmetric: a, b, c, d = a - c, b + d, c + a, d - b if avoid_square_root: angle_j = acos((a*a + b*b - c*c - d*d) / self.norm()**2) else: angle_j = 2 * atan2(sqrt(c * c + d * d), sqrt(a * a + b * b)) if angle_addition: angle_i = atan2(b, a) + atan2(d, c) angle_k = atan2(b, a) - atan2(d, c) else: angle_i = atan2(b*c + a*d, a*c - b*d) angle_k = atan2(b*c - a*d, a*c + b*d) # for Tait-Bryan angles if not symmetric: angle_j -= pi / 2 angle_i *= sign if extrinsic: return angle_k, angle_j, angle_i else: return angle_i, angle_j, angle_k
sympy/algebras/quaternion.py
545
sympy
{ "docstring": "Returns Euler angles representing same rotation as the quaternion,\n in the sequence given by `seq`. This implements the method described\n in [1]_.\n\n Parameters\n ==========\n\n seq : string of length 3\n Represents the sequence of rotations.\n For intrinsic rotations, seq must be all lowercase and its elements\n must be from the set `{'x', 'y', 'z'}`\n For extrinsic rotations, seq must be all uppercase and its elements\n must be from the set `{'X', 'Y', 'Z'}`\n\n angle_addition : bool\n Default : True\n When True, first and third angles are given as an addition and\n subtraction of two simpler `atan2` expressions. When False, the first\n and third angles are each given by a single more complicated\n `atan2` expression. This equivalent is given by:\n\n --math::\n \\operatorname{atan_2} (b,a) \\pm \\operatorname{atan_2} (d,c) =\n \\operatorname{atan_2} (bc\\pm ad, ac\\mp bd)\n\n avoid_square_root : bool\n Default : False\n When True, the second angle is calculated with an expression based on\n `acos`, which is slightly more complicated but avoids a square\n root. When False, second angle is calculated with `atan2`, which\n is simpler and can be better for numerical reasons (some\n numerical implementations of `acos` have problems near zero).\n\n\n Returns\n =======\n\n Tuple\n The Euler angles calculated from the quaternion\n\n Examples\n ========\n\n >>> from sympy import Quaternion\n >>> from sympy.abc import a, b, c, d\n >>> euler = Quaternion(a, b, c, d).to_euler('zyz')\n >>> euler\n (-atan2(-b, c) + atan2(d, a),\n 2*atan2(sqrt(b**2 + c**2), sqrt(a**2 + d**2)),\n atan2(-b, c) + atan2(d, a))\n\n\n References\n ==========\n\n .. [1] https://doi.org/10.1371/journal.pone.0276302\n\n ", "language": "en", "n_whitespaces": 633, "n_words": 242, "vocab_size": 147 }
218
Python
105
c254b28e2ba0f4cdfe9ff6523ffa6e369ab415ce
quaternion.py
200,702
36
349
to_euler
https://github.com/sympy/sympy.git
added two options to to_euler
557
0
49,777
16
4
19
def _update_class_log_prior(self, class_prior=None): n_classes = len(self.classes_) if class_prior is not None: if len(class_prior) != n_classes: raise ValueError("Number of priors must match number of classes.") self.class_log_prior_ = np.log(class_prior) elif self.fit_prior: with warnings.catch_warnings(): # silence the warning when count is 0 because class was not yet # observed warnings.simplefilter("ignore", RuntimeWarning) log_class_count = np.log(self.class_count_) # empirical prior, with sample_weight taken into account self.class_log_prior_ = log_class_count - np.log(self.class_count_.sum()) else: self.class_log_prior_ = np.full(n_classes, -np.log(n_classes))
sklearn/naive_bayes.py
195
scikit-learn
{ "docstring": "Update class log priors.\n\n The class log priors are based on `class_prior`, class count or the\n number of classes. This method is called each time `fit` or\n `partial_fit` update the model.\n ", "language": "en", "n_whitespaces": 59, "n_words": 31, "vocab_size": 26 }
69
Python
55
b6bc5599ae063a90c27c9c1fef1940d620b5a206
naive_bayes.py
259,009
13
115
_update_class_log_prior
https://github.com/scikit-learn/scikit-learn.git
DOC Clarifies comments and docstrings in _BaseDiscreteNB (#22565) Co-authored-by: Thomas J. Fan <[email protected]>
245
0
75,522
15
4
15
def change_member_label(self, label, new_label): if label not in self._member_labels: raise ValueError("No such member exists for the Truss") else: members_duplicate = self._members.copy() for member in members_duplicate: if member[0] == label: self._member_labels[self.member_labels.index(member[0])] = new_label self._member_nodes[new_label] = [self._member_nodes[label][0], self._member_nodes[label][1]] self._member_nodes.pop(label) self._internal_forces[new_label] = self._internal_forces[label] self._internal_forces.pop(label) self._members[self._members.index([label, member[1], member[2]])] = [new_label, member[1], member[2]]
sympy/physics/continuum_mechanics/truss.py
241
sympy
{ "docstring": "\n This method changes the label of a member.\n\n Parameters\n ==========\n label: String or Symbol\n The label of the member for which the label has\n to be changed.\n\n new_label: String or Symbol\n The new label of the member.\n\n Examples\n ========\n\n >>> from sympy.physics.continuum_mechanics.truss import Truss\n >>> t = Truss()\n >>> t.add_node('A', 0, 0)\n >>> t.add_node('B', 3, 0)\n >>> t.nodes\n [('A', 0, 0), ('B', 3, 0)]\n >>> t.change_node_label('A', 'C')\n >>> t.nodes\n [('C', 0, 0), ('B', 3, 0)]\n >>> t.add_member('BC', 'B', 'C')\n >>> t.members\n [['BC', 'B', 'C']]\n >>> t.change_member_label('BC', 'BC_new')\n >>> t.members\n [['BC_new', 'B', 'C']]\n ", "language": "en", "n_whitespaces": 287, "n_words": 92, "vocab_size": 55 }
48
Python
39
99ede53223eafb56b2c2b4ab7b8a6764b628c9d9
truss.py
198,535
13
161
change_member_label
https://github.com/sympy/sympy.git
remove_load method added along with other changes
231
0
48,987
18
1
2
def multiselect(self): return self["multiselect"]
packages/python/plotly/plotly/graph_objs/parcoords/_dimension.py
22
plotly.py
{ "docstring": "\n Do we allow multiple selection ranges or just a single range?\n\n The 'multiselect' property must be specified as a bool\n (either True, or False)\n\n Returns\n -------\n bool\n ", "language": "en", "n_whitespaces": 77, "n_words": 27, "vocab_size": 24 }
4
Python
4
43e3a4011080911901176aab919c0ecf5046ddd3
_dimension.py
233,094
2
11
multiselect
https://github.com/plotly/plotly.py.git
switch to black .22
18
0
64,538
7
9
18
def _ask_default(self, default=''): self.prompt_output.write('Please enter the default value as valid Python.') if default: self.prompt_output.write( f"Accept the default '{default}' by pressing 'Enter' or " f"provide another value." ) self.prompt_output.write( 'The datetime and django.utils.timezone modules are available, so ' 'it is possible to provide e.g. timezone.now as a value.' ) self.prompt_output.write("Type 'exit' to exit this prompt") while True: if default: prompt = "[default: {}] >>> ".format(default) else: prompt = ">>> " self.prompt_output.write(prompt, ending='') code = input() if not code and default: code = default if not code: self.prompt_output.write("Please enter some code, or 'exit' (without quotes) to exit.") elif code == "exit": sys.exit(1) else: try: return eval(code, {}, {'datetime': datetime, 'timezone': timezone}) except (SyntaxError, NameError) as e: self.prompt_output.write('Invalid input: %s' % e)
django/db/migrations/questioner.py
288
django
{ "docstring": "\n Prompt for a default value.\n\n The ``default`` argument allows providing a custom default value (as a\n string) which will be shown to the user and used as the return value\n if the user doesn't provide any other input.\n ", "language": "en", "n_whitespaces": 74, "n_words": 38, "vocab_size": 31 }
119
Python
91
0ab58c120939093fea90822f376e1866fc714d1f
questioner.py
202,903
30
158
_ask_default
https://github.com/django/django.git
Refs #29026 -- Allowed customizing InteractiveMigrationQuestioner's prompt destination. Previously, the questioner did not obey the value of stdout provided to the command.
473
0
50,164
17
3
21
def item_group_query(doctype, txt, searchfield, start, page_len, filters): item_groups = [] cond = "1=1" pos_profile = filters.get("pos_profile") if pos_profile: item_groups = get_item_groups(pos_profile) if item_groups: cond = "name in (%s)" % (", ".join(["%s"] * len(item_groups))) cond = cond % tuple(item_groups) return frappe.db.sql( .format( condition=cond, start=start, page_len=page_len ), {"txt": "%%%s%%" % txt}, ) @frappe.whitelist()
erpnext/selling/page/point_of_sale/point_of_sale.py
181
@frappe.whitelist()
erpnext
{ "docstring": " select distinct name from `tabItem Group`\n\t\t\twhere {condition} and (name like %(txt)s) limit {start}, {page_len}", "language": "en", "n_whitespaces": 14, "n_words": 15, "vocab_size": 15 }
51
Python
39
494bd9ef78313436f0424b918f200dab8fc7c20b
point_of_sale.py
67,383
16
102
item_group_query
https://github.com/frappe/erpnext.git
style: format code with black
35
1
14,511
17
11
28
def load_config(self, modelpath, use_gpu, gpu_id, use_mkldnn, cpu_threads): # 对运行位置进行配置 if use_gpu: try: int(os.environ.get('CUDA_VISIBLE_DEVICES')) except Exception: print( ) use_gpu = False if os.path.isdir(modelpath): if os.path.exists(os.path.join(modelpath, "__params__")): # __model__ + __params__ model = os.path.join(modelpath, "__model__") params = os.path.join(modelpath, "__params__") config = Config(model, params) elif os.path.exists(os.path.join(modelpath, "params")): # model + params model = os.path.join(modelpath, "model") params = os.path.join(modelpath, "params") config = Config(model, params) elif os.path.exists(os.path.join(modelpath, "__model__")): # __model__ + others config = Config(modelpath) else: raise Exception( "Error! Can\'t find the model in: %s. Please check your model path." % os.path.abspath(modelpath)) elif os.path.exists(modelpath + ".pdmodel"): # *.pdmodel + *.pdiparams model = modelpath + ".pdmodel" params = modelpath + ".pdiparams" config = Config(model, params) elif isinstance(modelpath, Config): config = modelpath else: raise Exception( "Error! Can\'t find the model in: %s. Please check your model path." % os.path.abspath(modelpath)) # 设置参数 if use_gpu: config.enable_use_gpu(100, gpu_id) else: config.disable_gpu() config.set_cpu_math_library_num_threads(cpu_threads) if use_mkldnn: config.enable_mkldnn() config.disable_glog_info() # 返回配置 return config # 预测器创建函数
modules/image/keypoint_detection/hand_pose_localization/model.py
496
PaddleHub
{ "docstring": "\r\n load the model config\r\n modelpath: inference model path\r\n use_gpu: use gpu or not\r\n use_mkldnn: use mkldnn or not\r\n Error! Unable to use GPU. Please set the environment variables \"CUDA_VISIBLE_DEVICES=GPU_id\" to use GPU. Now switch to CPU to continue...", "language": "en", "n_whitespaces": 73, "n_words": 38, "vocab_size": 27 }
151
Python
76
6b42963d62833925ffed1cdb73400e7d528a5353
model.py
50,991
40
291
load_config
https://github.com/PaddlePaddle/PaddleHub.git
update hand_pose_localization (#1967) * update hand_pose_localization * add clean func
699
0
10,250
16
1
13
def call(cls, y_true, y_pred): diff = K.abs(y_true - y_pred) max_loss = K.max(diff, axis=(1, 2), keepdims=True) loss = K.mean(max_loss, axis=-1) return loss
lib/model/losses_tf.py
84
faceswap
{ "docstring": " Call the L-inf norm loss function.\n\n Parameters\n ----------\n y_true: tensor or variable\n The ground truth value\n y_pred: tensor or variable\n The predicted value\n\n Returns\n -------\n tensor\n The loss value\n ", "language": "en", "n_whitespaces": 119, "n_words": 29, "vocab_size": 20 }
21
Python
18
c1512fd41d86ef47a5d1ce618d6d755ef7cbacdf
losses_tf.py
100,356
5
55
call
https://github.com/deepfakes/faceswap.git
Update code to support Tensorflow versions up to 2.8 (#1213) * Update maximum tf version in setup + requirements * - bump max version of tf version in launcher - standardise tf version check * update keras get_custom_objects for tf>2.6 * bugfix: force black text in GUI file dialogs (linux) * dssim loss - Move to stock tf.ssim function * Update optimizer imports for compatibility * fix logging for tf2.8 * Fix GUI graphing for TF2.8 * update tests * bump requirements.txt versions * Remove limit on nvidia-ml-py * Graphing bugfixes - Prevent live graph from displaying if data not yet available * bugfix: Live graph. Collect loss labels correctly * fix: live graph - swallow inconsistent loss errors * Bugfix: Prevent live graph from clearing during training * Fix graphing for AMD
56
0
19,845
10
1
29
def test_column_feature_type_mismatch_fill(): cat_feat = category_feature() bin_feat = binary_feature() input_features = [cat_feat] output_features = [bin_feat] config = {"input_features": input_features, "output_features": output_features} # Construct dataframe with int-like column representing a categorical feature df = pd.DataFrame( { cat_feat[NAME]: pd.Series(pd.array([None] + [1] * 24, dtype=pd.Int64Dtype())), bin_feat[NAME]: pd.Series([True] * 25), } ) # run preprocessing backend = LocalTestBackend() ludwig_model = LudwigModel(config, backend=backend) train_ds, val_ds, test_ds, _ = ludwig_model.preprocess(dataset=df) @pytest.mark.parametrize("format", ["file", "df"])
tests/integration_tests/test_preprocessing.py
230
@pytest.mark.parametrize("format", ["file", "df"])
ludwig
{ "docstring": "Tests that we are able to fill missing values even in columns where the column dtype and desired feature\n dtype do not match.", "language": "en", "n_whitespaces": 25, "n_words": 23, "vocab_size": 22 }
66
Python
56
1e6dbeff57fc5065b97dd018b904b9907468676f
test_preprocessing.py
7,805
15
125
test_column_feature_type_mismatch_fill
https://github.com/ludwig-ai/ludwig.git
Treat dataset columns as object dtype during first pass of handle_missing_values (#2398)
140
1
1,273
17
1
5
def _numel_arraylike(x, **kwargs): return _numel(x, coerce_np_ndarray=False, **kwargs)
dask/array/backends.py
34
dask
{ "docstring": "Numel implementation for arrays that want to return numel of the same type.", "language": "en", "n_whitespaces": 12, "n_words": 13, "vocab_size": 13 }
7
Python
7
8b95f983c232c1bd628e9cba0695d3ef229d290b
backends.py
156,880
2
21
_numel_arraylike
https://github.com/dask/dask.git
Sparse array reductions (#9342)
13
0
36,797
8
1
5
def _new_step(self): self.should_save = False self.should_evaluate = False self.should_log = False
paddlenlp/trainer/trainer_callback.py
37
PaddleNLP
{ "docstring": "Internal method that resets the variable for a new step.", "language": "en", "n_whitespaces": 9, "n_words": 10, "vocab_size": 10 }
11
Python
7
44a290e94d1becd1f09fddc3d873f9e19c9d6919
trainer_callback.py
323,153
4
21
_new_step
https://github.com/PaddlePaddle/PaddleNLP.git
[Trainer] Add init version of paddlenlp trainer and apply finetune for ernie-1.0 pretraining. (#1761) * add some datasets for finetune. * support fine tune for all tastks. * add trainer prototype. * init verison for paddlenlp trainer. * refine trainer. * update for some details. * support multi-cards training evaluation. * support load from ckpt. * support for export inference model. * first version of trainer. * seq cls support clue. * trainer support for token classification and question answersing tasks. * fix as reviews. Co-authored-by: Zeyu Chen <[email protected]>
39
0
118,389
7
1
29
def test_realm_quota(self) -> None: self.login("hamlet") d1 = StringIO("zulip!") d1.name = "dummy_1.txt" result = self.client_post("/json/user_uploads", {"file": d1}) response_dict = self.assert_json_success(result) d1_path_id = re.sub("/user_uploads/", "", response_dict["uri"]) d1_attachment = Attachment.objects.get(path_id=d1_path_id) realm = get_realm("zulip") realm.upload_quota_gb = 1 realm.save(update_fields=["upload_quota_gb"]) # The size of StringIO("zulip!") is 6 bytes. Setting the size of # d1_attachment to realm.upload_quota_bytes() - 11 should allow # us to upload only one more attachment. quota = realm.upload_quota_bytes() assert quota is not None d1_attachment.size = quota - 11 d1_attachment.save(update_fields=["size"]) d2 = StringIO("zulip!") d2.name = "dummy_2.txt" result = self.client_post("/json/user_uploads", {"file": d2}) self.assert_json_success(result) d3 = StringIO("zulip!") d3.name = "dummy_3.txt" result = self.client_post("/json/user_uploads", {"file": d3}) self.assert_json_error(result, "Upload would exceed your organization's upload quota.") realm.upload_quota_gb = None realm.save(update_fields=["upload_quota_gb"]) result = self.client_post("/json/user_uploads", {"file": d3}) self.assert_json_success(result)
zerver/tests/test_upload.py
402
zulip
{ "docstring": "\n Realm quota for uploading should not be exceeded.\n ", "language": "en", "n_whitespaces": 23, "n_words": 8, "vocab_size": 8 }
117
Python
69
a142fbff85302c5e3acb2e204eca2e9c75dbc74b
test_upload.py
84,150
30
223
test_realm_quota
https://github.com/zulip/zulip.git
tests: Refactor away result.json() calls with helpers. Signed-off-by: Zixuan James Li <[email protected]>
327
0
17,789
11
1
2
def readable(self): return False
python3.10.4/Lib/_pyio.py
16
XX-Net
{ "docstring": "Return a bool indicating whether object was opened for reading.\n\n If False, read() will raise OSError.\n ", "language": "en", "n_whitespaces": 30, "n_words": 16, "vocab_size": 16 }
4
Python
4
8198943edd73a363c266633e1aa5b2a9e9c9f526
_pyio.py
219,890
2
8
readable
https://github.com/XX-net/XX-Net.git
add python 3.10.4 for windows
18
0
55,883
6
3
13
async def get_object_refs_from_last_execute(self) -> Dict[str, Any]: cache = {} for node_uuid, value in self.cache_from_last_execute.items(): if isinstance(value, asyncio.Task): cache[node_uuid] = await value else: cache[node_uuid] = value return cache
python/ray/dag/dag_node.py
92
ray
{ "docstring": "Gets cached object refs from the last call to execute().\n\n After this DAG is executed through execute(), retrieves a map between node\n UUID to a reference to the return value of the default executor on that node.\n ", "language": "en", "n_whitespaces": 58, "n_words": 37, "vocab_size": 32 }
27
Python
21
4c970cc88247f7cfa7351297b8b5050f2372742e
dag_node.py
127,340
13
57
get_object_refs_from_last_execute
https://github.com/ray-project/ray.git
[serve] Visualize Deployment Graph with Gradio (#27897)
107
0
28,418
12
1
2
def editrevision(self): return self["editrevision"]
packages/python/plotly/plotly/graph_objs/_layout.py
22
plotly.py
{ "docstring": "\n Controls persistence of user-driven changes in `editable: true`\n configuration, other than trace names and axis titles. Defaults\n to `layout.uirevision`.\n\n The 'editrevision' property accepts values of any type\n\n Returns\n -------\n Any\n ", "language": "en", "n_whitespaces": 87, "n_words": 30, "vocab_size": 29 }
4
Python
4
43e3a4011080911901176aab919c0ecf5046ddd3
_layout.py
227,325
2
11
editrevision
https://github.com/plotly/plotly.py.git
switch to black .22
18
0
58,998
7
5
7
def RGS_generalized(m): d = zeros(m + 1) for i in range(m + 1): d[0, i] = 1 for i in range(1, m + 1): for j in range(m): if j <= m - i: d[i, j] = j * d[i - 1, j] + d[i - 1, j + 1] else: d[i, j] = 0 return d
sympy/combinatorics/partitions.py
148
sympy
{ "docstring": "\n Computes the m + 1 generalized unrestricted growth strings\n and returns them as rows in matrix.\n\n Examples\n ========\n\n >>> from sympy.combinatorics.partitions import RGS_generalized\n >>> RGS_generalized(6)\n Matrix([\n [ 1, 1, 1, 1, 1, 1, 1],\n [ 1, 2, 3, 4, 5, 6, 0],\n [ 2, 5, 10, 17, 26, 0, 0],\n [ 5, 15, 37, 77, 0, 0, 0],\n [ 15, 52, 151, 0, 0, 0, 0],\n [ 52, 203, 0, 0, 0, 0, 0],\n [203, 0, 0, 0, 0, 0, 0]])\n ", "language": "en", "n_whitespaces": 162, "n_words": 81, "vocab_size": 46 }
57
Python
32
7d773eb18daaef3c54f34d1ac6cbc5b83a5bb16c
partitions.py
198,361
11
99
RGS_generalized
https://github.com/sympy/sympy.git
Cleanup loops and ranges
138
0
48,881
16
1
4
def _draw_rasterized(figure, artists, renderer):
lib/matplotlib/axes/_base.py
17
matplotlib
{ "docstring": "\n A helper function for rasterizing the list of artists.\n\n The bookkeeping to track if we are or are not in rasterizing mode\n with the mixed-mode backends is relatively complicated and is now\n handled in the matplotlib.artist.allow_rasterization decorator.\n\n This helper defines the absolute minimum methods and attributes on\n shim class to be compatible with that decorator and the uses it to\n rasterize the list of artists.\n\n This is maybe too-clever, but allows us to re-use the same code that is\n used on normal artists to participate in the \"are we rasterizing\"\n accounting.\n\n Please do not use this outside of the \"rasterize below a given zorder\"\n functionality of Axes.\n\n Parameters\n ----------\n figure : matplotlib.figure.Figure\n The figure all of the artists belong to (not checked). We need this\n because we can at the figure level suppress composition and insert each\n rasterized artist as it's own image.\n\n artists : List[matplotlib.artist.Artist]\n The list of Artists to be rasterized. These are assumed to all\n be in the same Figure.\n\n renderer : matplotlib.backendbases.RendererBase\n The currently active renderer\n\n Returns\n -------\n None\n\n ", "language": "en", "n_whitespaces": 281, "n_words": 173, "vocab_size": 112 }
4
Python
4
eb52a34559bad8e86c85069e5af15d0eb3d5c6f9
_base.py
110,772
8
37
_draw_rasterized
https://github.com/matplotlib/matplotlib.git
DOC: add docstring to too-clever helper function
7
0
24,287
6