complexity
int64
1
56
n_identifiers
int64
1
114
code
stringlengths
19
12.7k
path
stringlengths
8
134
n_ast_nodes
int64
12
2.35k
ast_errors
stringlengths
0
4.01k
repo
stringlengths
3
28
documentation
dict
n_words
int64
2
866
language
stringclasses
1 value
vocab_size
int64
2
323
commit_id
stringlengths
40
40
file_name
stringlengths
5
79
id
int64
243
338k
nloc
int64
1
228
token_counts
int64
5
1.4k
fun_name
stringlengths
1
77
url
stringlengths
31
60
commit_message
stringlengths
3
15.3k
n_whitespaces
int64
1
3.23k
n_ast_errors
int64
0
20
d_id
int64
74
121k
ast_levels
int64
4
29
9
23
def quantize(self, colors=256, method=None, kmeans=0, palette=None, dither=1): self.load() if method is None: # defaults: method = Quantize.MEDIANCUT if self.mode == "RGBA": method = Quantize.FASTOCTREE if self.mode == "RGBA" and method not in ( Quantize.FASTOCTREE, Quantize.LIBIMAGEQUANT, ): # Caller specified an invalid mode. raise ValueError( "Fast Octree (method == 2) and libimagequant (method == 3) " "are the only valid methods for quantizing RGBA images" ) if palette: # use palette from reference image palette.load() if palette.mode != "P": raise ValueError("bad mode for palette image") if self.mode != "RGB" and self.mode != "L": raise ValueError( "only RGB or L mode images can be quantized to a palette" ) im = self.im.convert("P", dither, palette.im) new_im = self._new(im) new_im.palette = palette.palette.copy() return new_im im = self._new(self.im.quantize(colors, method, kmeans)) from . import ImagePalette mode = im.im.getpalettemode() palette = im.im.getpalette(mode, mode)[: colors * len(mode)] im.palette = ImagePalette.ImagePalette(mode, palette) return im
src/PIL/Image.py
367
Pillow
{ "docstring": "\n Convert the image to 'P' mode with the specified number\n of colors.\n\n :param colors: The desired number of colors, <= 256\n :param method: :data:`Quantize.MEDIANCUT` (median cut),\n :data:`Quantize.MAXCOVERAGE` (maximum coverage),\n :data:`Quantize.FASTOCTREE` (fast octree),\n :data:`Quantize.LIBIMAGEQUANT` (libimagequant; check support\n using :py:func:`PIL.features.check_feature` with\n ``feature=\"libimagequant\"``).\n\n By default, :data:`Quantize.MEDIANCUT` will be used.\n\n The exception to this is RGBA images. :data:`Quantize.MEDIANCUT`\n and :data:`Quantize.MAXCOVERAGE` do not support RGBA images, so\n :data:`Quantize.FASTOCTREE` is used by default instead.\n :param kmeans: Integer\n :param palette: Quantize to the palette of given\n :py:class:`PIL.Image.Image`.\n :param dither: Dithering method, used when converting from\n mode \"RGB\" to \"P\" or from \"RGB\" or \"L\" to \"1\".\n Available methods are :data:`Dither.NONE` or :data:`Dither.FLOYDSTEINBERG`\n (default).\n Default: 1 (legacy setting)\n :returns: A new image\n\n ", "language": "en", "n_whitespaces": 439, "n_words": 114, "vocab_size": 85 }
145
Python
102
f8e4e9c2dd94c6f4789639dd891b8a6d5fb16e14
Image.py
242,236
32
222
quantize
https://github.com/python-pillow/Pillow.git
Added enums
514
0
69,798
12
1
18
def dummy_inputs(self) -> Dict[str, tf.Tensor]: VISION_DUMMY_INPUTS = tf.random.uniform(shape=(3, self.config.num_channels, 224, 224), dtype=tf.float32) return {"pixel_values": tf.constant(VISION_DUMMY_INPUTS)} RESNET_START_DOCSTRING = r RESNET_INPUTS_DOCSTRING = r @keras_serializable
src/transformers/models/resnet/modeling_tf_resnet.py
104
@keras_serializable
transformers
{ "docstring": "\n Dummy inputs to build the network. Returns:\n `Dict[str, tf.Tensor]`: The dummy inputs.\n \n This model is a TensorFlow\n [tf.keras.layers.Layer](https://www.tensorflow.org/api_docs/python/tf/keras/layers/Layer) sub-class. Use it as a\n regular TensorFlow Module and refer to the TensorFlow documentation for all matter related to general usage and\n behavior.\n\n Parameters:\n config ([`ResNetConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~TFPreTrainedModel.from_pretrained`] method to load the model weights.\n\n Args:\n pixel_values (`tf.Tensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Pixel values can be obtained using [`AutoFeatureExtractor`]. See\n [`AutoFeatureExtractor.__call__`] for details.\n\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n", "language": "en", "n_whitespaces": 289, "n_words": 140, "vocab_size": 98 }
22
Python
19
77ea5130a1cd7de36796cc4d1bae6f21094d8863
modeling_tf_resnet.py
32,050
7
56
dummy_inputs
https://github.com/huggingface/transformers.git
Add TF ResNet model (#17427) * Rought TF conversion outline * Tidy up * Fix padding differences between layers * Add back embedder - whoops * Match test file to main * Match upstream test file * Correctly pass and assign image_size parameter Co-authored-by: Sayak Paul <[email protected]> * Add in MainLayer * Correctly name layer * Tidy up AdaptivePooler * Small tidy-up More accurate type hints and remove whitespaces * Change AdaptiveAvgPool Use the AdaptiveAvgPool implementation by @Rocketknight1, which correctly pools if the output shape does not evenly divide by input shape c.f. https://github.com/huggingface/transformers/pull/17554/files/9e26607e22aa8d069c86b50196656012ff0ce62a#r900109509 Co-authored-by: From: matt <[email protected]> Co-authored-by: Sayak Paul <[email protected]> * Use updated AdaptiveAvgPool Co-authored-by: matt <[email protected]> * Make AdaptiveAvgPool compatible with CPU * Remove image_size from configuration * Fixup * Tensorflow -> TensorFlow * Fix pt references in tests * Apply suggestions from code review - grammar and wording Co-authored-by: NielsRogge <[email protected]> Co-authored-by: NielsRogge <[email protected]> * Add TFResNet to doc tests * PR comments - GlobalAveragePooling and clearer comments * Remove unused import * Add in keepdims argument * Add num_channels check * grammar fix: by -> of Co-authored-by: matt <[email protected]> Co-authored-by: Matt <[email protected]> * Remove transposes - keep NHWC throughout forward pass * Fixup look sharp * Add missing layer names * Final tidy up - remove from_pt now weights on hub Co-authored-by: Sayak Paul <[email protected]> Co-authored-by: matt <[email protected]> Co-authored-by: NielsRogge <[email protected]> Co-authored-by: Matt <[email protected]>
40
1
5,843
12
2
14
def __reduce__(self) -> Tuple[Any, Any, Any]: pickled_state = super(FeatureArray, self).__reduce__() if isinstance(pickled_state, str): raise TypeError("np array __reduce__ returned string instead of tuple.") new_state = pickled_state[2] + ( self.number_of_dimensions, self.is_sparse, self.units, ) return pickled_state[0], pickled_state[1], new_state
rasa/utils/tensorflow/model_data.py
110
rasa
{ "docstring": "Needed in order to pickle this object.\n\n Returns:\n A tuple.\n ", "language": "en", "n_whitespaces": 35, "n_words": 10, "vocab_size": 10 }
35
Python
33
4cdceaab5271a5b51463ec562c8eb55f96b771c5
model_data.py
159,676
15
73
__reduce__
https://github.com/RasaHQ/rasa.git
Bump numpy from 1.19.5 to 1.21.6 (#11078) * Bump numpy from 1.19.5 to 1.21.6 Bumps [numpy](https://github.com/numpy/numpy) from 1.19.5 to 1.21.6. - [Release notes](https://github.com/numpy/numpy/releases) - [Changelog](https://github.com/numpy/numpy/blob/main/doc/HOWTO_RELEASE.rst.txt) - [Commits](https://github.com/numpy/numpy/compare/v1.19.5...v1.21.6) --- updated-dependencies: - dependency-name: numpy dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] <[email protected]> * fixed mypy errors for numpy 1.21.6 upgrade * removed duplicate np.array call Signed-off-by: dependabot[bot] <[email protected]> Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: Thomas Werkmeister <[email protected]> Co-authored-by: melindaloubser1 <[email protected]>
121
0
38,392
10
1
17
def test_key_query_cancellation(self) -> None: self.register_user("alice", "wonderland") alice_token = self.login("alice", "wonderland") bob = self.register_user("bob", "uncle") channel = make_request_with_cancellation_test( "test_key_query_cancellation", self.reactor, self.site, "POST", "/_matrix/client/r0/keys/query", { "device_keys": { # Empty list means we request keys for all bob's devices bob: [], }, }, token=alice_token, ) self.assertEqual(200, channel.code, msg=channel.result["body"]) self.assertIn(bob, channel.json_body["device_keys"])
tests/rest/client/test_keys.py
177
synapse
{ "docstring": "\n Tests that /keys/query is cancellable and does not swallow the\n CancelledError.\n ", "language": "en", "n_whitespaces": 33, "n_words": 11, "vocab_size": 11 }
47
Python
42
d3d9ca156e323fe194b1bcb1af1628f65a2f3c1c
test_keys.py
249,472
23
104
test_key_query_cancellation
https://github.com/matrix-org/synapse.git
Cancel the processing of key query requests when they time out. (#13680)
259
0
72,939
13
2
3
def test_normal_operation(self, ray_instance):
python/ray/serve/tests/test_standalone2.py
15
ray
{ "docstring": "Checks that a moderate timeout doesn't affect normal operation.", "language": "en", "n_whitespaces": 8, "n_words": 9, "vocab_size": 9 }
3
Python
3
700618d0dbd27282ce72441d22a0000075b7a54f
test_standalone2.py
135,946
7
52
test_normal_operation
https://github.com/ray-project/ray.git
[Serve] Add the `SERVE_REQUEST_PROCESSING_TIMEOUT_S` environment variable (#29534)
10
0
30,778
6
1
8
async def test_slug(hass, caplog): result = slug(hass, "http://127.0.0.2/testurl/{{1/0}}") assert result is None assert "Syntax error in" in caplog.text @respx.mock
tests/components/generic/test_config_flow.py
54
@respx.mock
core
{ "docstring": "\n Test that the slug function generates an error in case of invalid template.\n\n Other paths in the slug function are already tested by other tests.\n ", "language": "en", "n_whitespaces": 35, "n_words": 25, "vocab_size": 21 }
19
Python
17
e64336cb91d1ce97ac82c57e98477acedfcbcf71
test_config_flow.py
314,918
4
26
test_slug
https://github.com/home-assistant/core.git
Allow configuring username and password in generic camera config flow (#73804) * Add ability to use user & pw not in stream url * Increase test coverage to 100% * Increase test coverage * Verify that stream source includes user:pass * Code review: refactor test to use MockConfigEntry * Code review: Improve test docstring * Edit comment; retrigger CI. Co-authored-by: Dave T <[email protected]>
30
1
113,519
9
8
9
def _get_text_feature_max_length(config, training_set_metadata) -> int: max_length = 0 for feature in config["input_features"]: if feature["type"] == TEXT: feature_max_len = training_set_metadata[feature["name"]]["word_max_sequence_length"] if feature_max_len > max_length: max_length = feature_max_len if ( ("preprocessing" in config) and (TEXT in config["preprocessing"]) and ("word_sequence_length_limit" in config["preprocessing"][TEXT]) ): limit = config["preprocessing"][TEXT]["word_sequence_length_limit"] else: limit = 256 # Preprocessing default word_sequence_length_limit = 256 if max_length > limit + 2: # For start and stop symbols. max_length = limit + 2 return max_length
ludwig/automl/auto_tune_config.py
190
ludwig
{ "docstring": "Returns max sequence length over text features, subject to preprocessing limit.", "language": "en", "n_whitespaces": 10, "n_words": 11, "vocab_size": 11 }
72
Python
45
d77aaf8da39f04a353a3a08fb699ae8a96ffea3a
auto_tune_config.py
6,396
19
110
_get_text_feature_max_length
https://github.com/ludwig-ai/ludwig.git
Improve AutoML heuristics for text classification (#1815) * Improve AutoML heuristics for text classification Co-authored-by: Anne Holler <[email protected]>
184
0
970
14
3
8
async def wait(self) -> None: if self._is_set: return if not self._loop: self._loop = get_running_loop() self._event = asyncio.Event() await self._event.wait()
src/prefect/_internal/concurrency/primitives.py
78
prefect
{ "docstring": "\n Wait until the flag has been set.\n\n If the flag has already been set when this method is called, it returns immediately.\n ", "language": "en", "n_whitespaces": 44, "n_words": 22, "vocab_size": 18 }
19
Python
17
a368874d1b145c1ec5201e5efd3c26ce7c1e8611
primitives.py
60,126
12
44
wait
https://github.com/PrefectHQ/prefect.git
Add thread-safe async primitives `Event` and `Future` (#7865) Co-authored-by: Serina Grill <[email protected]>
80
0
11,991
10
3
22
def generic_parser(parse_func, *cols) -> np.ndarray: warnings.warn( "Use pd.to_datetime instead.", FutureWarning, stacklevel=find_stack_level(inspect.currentframe()), ) N = _check_columns(cols) results = np.empty(N, dtype=object) for i in range(N): args = [c[i] for c in cols] results[i] = parse_func(*args) return results
pandas/io/date_converters.py
131
pandas
{ "docstring": "\n Use dateparser to parse columns with data information into a single datetime column.\n\n .. deprecated:: 1.2\n ", "language": "en", "n_whitespaces": 26, "n_words": 16, "vocab_size": 16 }
35
Python
29
6787b8b73f4c54a0cf742a90433e6fb6c7edb231
date_converters.py
168,932
17
83
generic_parser
https://github.com/pandas-dev/pandas.git
TST: Address/catch more test warnings (#48358)
91
0
40,344
12
1
2
def computed(self): return self["computed"]
packages/python/plotly/plotly/graph_objs/_layout.py
22
plotly.py
{ "docstring": "\n Placeholder for exporting automargin-impacting values namely\n `margin.t`, `margin.b`, `margin.l` and `margin.r` in \"full-\n json\" mode.\n\n The 'computed' property accepts values of any type\n\n Returns\n -------\n Any\n ", "language": "en", "n_whitespaces": 83, "n_words": 26, "vocab_size": 25 }
4
Python
4
43e3a4011080911901176aab919c0ecf5046ddd3
_layout.py
227,377
2
11
computed
https://github.com/plotly/plotly.py.git
switch to black .22
18
0
59,050
7
3
13
def profile(event_type, extra_data=None): if not PROFILING_ENABLED: return NULL_LOG_SPAN worker = ray.worker.global_worker if worker.mode == ray.worker.LOCAL_MODE: return NULL_LOG_SPAN return worker.core_worker.profile_event(event_type.encode("ascii"), extra_data)
python/ray/_private/profiling.py
85
ray
{ "docstring": "Profile a span of time so that it appears in the timeline visualization.\n\n Note that this only works in the raylet code path.\n\n This function can be used as follows (both on the driver or within a task).\n\n .. code-block:: python\n import ray._private.profiling as profiling\n\n with profiling.profile(\"custom event\", extra_data={'key': 'val'}):\n # Do some computation here.\n\n Optionally, a dictionary can be passed as the \"extra_data\" argument, and\n it can have keys \"name\" and \"cname\" if you want to override the default\n timeline display text and box color. Other values will appear at the bottom\n of the chrome tracing GUI when you click on the box corresponding to this\n profile span.\n\n Args:\n event_type: A string describing the type of the event.\n extra_data: This must be a dictionary mapping strings to strings. This\n data will be added to the json objects that are used to populate\n the timeline, so if you want to set a particular color, you can\n simply set the \"cname\" attribute to an appropriate color.\n Similarly, if you set the \"name\" attribute, then that will set the\n text displayed on the box in the timeline.\n\n Returns:\n An object that can profile a span of time via a \"with\" statement.\n ", "language": "en", "n_whitespaces": 333, "n_words": 199, "vocab_size": 120 }
20
Python
16
7f1bacc7dc9caf6d0ec042e39499bbf1d9a7d065
profiling.py
130,162
7
52
profile
https://github.com/ray-project/ray.git
[CI] Format Python code with Black (#21975) See #21316 and #21311 for the motivation behind these changes.
49
0
29,129
10
9
19
def mathieu_even_coef(m, q): r if not (isscalar(m) and isscalar(q)): raise ValueError("m and q must be scalars.") if (q < 0): raise ValueError("q >=0") if (m != floor(m)) or (m < 0): raise ValueError("m must be an integer >=0.") if (q <= 1): qm = 7.5 + 56.1*sqrt(q) - 134.7*q + 90.7*sqrt(q)*q else: qm = 17.0 + 3.1*sqrt(q) - .126*q + .0037*sqrt(q)*q km = int(qm + 0.5*m) if km > 251: warnings.warn("Too many predicted coefficients.", RuntimeWarning, 2) kd = 1 m = int(floor(m)) if m % 2: kd = 2 a = mathieu_a(m, q) fc = _specfun.fcoef(kd, m, q, a) return fc[:km]
scipy/special/_basic.py
304
scipy
{ "docstring": "Fourier coefficients for even Mathieu and modified Mathieu functions.\n\n The Fourier series of the even solutions of the Mathieu differential\n equation are of the form\n\n .. math:: \\mathrm{ce}_{2n}(z, q) = \\sum_{k=0}^{\\infty} A_{(2n)}^{(2k)} \\cos 2kz\n\n .. math:: \\mathrm{ce}_{2n+1}(z, q) = \\sum_{k=0}^{\\infty} A_{(2n+1)}^{(2k+1)} \\cos (2k+1)z\n\n This function returns the coefficients :math:`A_{(2n)}^{(2k)}` for even\n input m=2n, and the coefficients :math:`A_{(2n+1)}^{(2k+1)}` for odd input\n m=2n+1.\n\n Parameters\n ----------\n m : int\n Order of Mathieu functions. Must be non-negative.\n q : float (>=0)\n Parameter of Mathieu functions. Must be non-negative.\n\n Returns\n -------\n Ak : ndarray\n Even or odd Fourier coefficients, corresponding to even or odd m.\n\n References\n ----------\n .. [1] Zhang, Shanjie and Jin, Jianming. \"Computation of Special\n Functions\", John Wiley and Sons, 1996.\n https://people.sc.fsu.edu/~jburkardt/f77_src/special_functions/special_functions.html\n .. [2] NIST Digital Library of Mathematical Functions\n https://dlmf.nist.gov/28.4#i\n\n ", "language": "en", "n_whitespaces": 238, "n_words": 128, "vocab_size": 82 }
101
Python
70
4871f3d1c61bdb296ae03e3480f5f584f5c67256
_basic.py
241,808
55
205
mathieu_even_coef
https://github.com/scipy/scipy.git
MAINT: optimize, special, signal: Use custom warnings instead of print statements (#15259) Co-authored-by: Pamphile Roy <[email protected]> Co-authored-by: Tirth Patel <[email protected]>
191
0
69,704
15
3
18
def prefetch_renditions(self, *filters): # Get a list of filter spec strings. The given value could contain Filter objects filter_specs = [ filter.spec if isinstance(filter, Filter) else filter for filter in filters ] rendition_model = self.model.get_rendition_model() return self.prefetch_related( models.Prefetch( "renditions", queryset=rendition_model.objects.filter(filter_spec__in=filter_specs), to_attr="prefetched_renditions", ) )
wagtail/images/models.py
109
wagtail
{ "docstring": "\n Prefetches generated renditions for the given filters.\n ", "language": "en", "n_whitespaces": 22, "n_words": 7, "vocab_size": 7 }
43
Python
39
52ace9eae7311fa708dd19a7d6b6cabfb36a8fee
models.py
77,627
12
68
prefetch_renditions
https://github.com/wagtail/wagtail.git
Add prefetch_renditions method on Image queryset manager Update logic when creating and looking for a rendtion
170
0
16,680
13
1
11
def test_dataset_shard_with_loader_fn(self): dset = ray.data.range(100) config = {"input": "dataset", "input_config": {"loader_fn": lambda: dset}} ret_dataset, _ = get_dataset_and_shards(config) assert ret_dataset.count() == dset.count()
rllib/offline/tests/test_dataset_reader.py
95
ray
{ "docstring": "Tests whether the dataset_shard function works correctly with loader_fn.", "language": "en", "n_whitespaces": 8, "n_words": 9, "vocab_size": 9 }
21
Python
19
569fe0109629048d08e1d9e023f7769f10bd2244
test_dataset_reader.py
125,008
5
53
test_dataset_shard_with_loader_fn
https://github.com/ray-project/ray.git
[RLlib] improved unittests for dataset_reader and fixed bugs (#26458)
56
0
27,747
11
3
9
def _preprocess_conv3d_input(x, data_format): tf_data_format = "NDHWC" if data_format == "channels_first": if not _has_nchw_support(): x = tf.compat.v1.transpose(x, (0, 2, 3, 4, 1)) else: tf_data_format = "NCDHW" return x, tf_data_format
keras/backend.py
92
keras
{ "docstring": "Transpose and cast the input before the conv3d.\n\n Args:\n x: input tensor.\n data_format: string, `\"channels_last\"` or `\"channels_first\"`.\n\n Returns:\n A tensor.\n ", "language": "en", "n_whitespaces": 50, "n_words": 20, "vocab_size": 17 }
28
Python
23
84afc5193d38057e2e2badf9c889ea87d80d8fbf
backend.py
269,509
8
55
_preprocess_conv3d_input
https://github.com/keras-team/keras.git
Reformatting the codebase with black. PiperOrigin-RevId: 450093126
76
0
80,140
14
1
12
async def test_get_application_credentials(hass): test_1_integration = _get_test_integration(hass, "test_1", True) test_2_integration = _get_test_integration_with_application_credentials( hass, "test_2" ) with patch("homeassistant.loader.async_get_custom_components") as mock_get: mock_get.return_value = { "test_1": test_1_integration, "test_2": test_2_integration, } application_credentials = await loader.async_get_application_credentials(hass) assert "test_2" in application_credentials assert "test_1" not in application_credentials
tests/test_loader.py
118
core
{ "docstring": "Verify that custom components with application_credentials are found.", "language": "en", "n_whitespaces": 7, "n_words": 8, "vocab_size": 8 }
39
Python
31
ae01ec02e28d4b83ef64636e36de2baf59c19874
test_loader.py
299,612
13
64
test_get_application_credentials
https://github.com/home-assistant/core.git
Allow custom integrations to support application_credentials platform (#71129)
118
0
98,529
11
2
12
def start_reaper_process(self): assert ( not self.kernel_fate_share ), "a reaper should not be used with kernel fate-sharing" process_info = ray._private.services.start_reaper(fate_share=False) assert ray_constants.PROCESS_TYPE_REAPER not in self.all_processes if process_info is not None: self.all_processes[ray_constants.PROCESS_TYPE_REAPER] = [ process_info, ]
python/ray/node.py
91
ray
{ "docstring": "\n Start the reaper process.\n\n This must be the first process spawned and should only be called when\n ray processes should be cleaned up if this process dies.\n ", "language": "en", "n_whitespaces": 56, "n_words": 27, "vocab_size": 22 }
34
Python
28
7f1bacc7dc9caf6d0ec042e39499bbf1d9a7d065
node.py
130,804
10
57
start_reaper_process
https://github.com/ray-project/ray.git
[CI] Format Python code with Black (#21975) See #21316 and #21311 for the motivation behind these changes.
124
0
29,376
10
1
11
def get_best_result(self) -> Tuple[int, Module, Dict[str, Dict[str, Tensor]], float, List[Dict]]: raise NotImplementedError()
nni/compression/pytorch/base/scheduler.py
51
nni
{ "docstring": "\n Returns\n -------\n Tuple[int, Module, Dict[str, Dict[str, Tensor]], float, List[Dict]]\n Return the task result that has the best performance,\n inculde task id, the compact model, the masks on the compact model, score and config list used in this task.\n ", "language": "en", "n_whitespaces": 89, "n_words": 38, "vocab_size": 30 }
12
Python
11
d68c786ff81bad19c04619d6a999ff34aaa724e7
scheduler.py
113,578
9
36
get_best_result
https://github.com/microsoft/nni.git
[Compression] remove pruning v1 & refactor directory (#5228)
26
0
24,963
7
1
5
def register(cls, function, *call_args, **call_kwds):
modin/core/dataframe/algebra/map.py
23
modin
{ "docstring": "\n Build Map operator that will be performed across each partition.\n\n Parameters\n ----------\n function : callable(pandas.DataFrame) -> pandas.DataFrame\n Function that will be applied to the each partition.\n Function takes `pandas.DataFrame` and returns `pandas.DataFrame`\n of the same shape.\n *call_args : args\n Args that will be passed to the returned function.\n **call_kwds : kwargs\n Kwargs that will be passed to the returned function.\n\n Returns\n -------\n callable\n Function that takes query compiler and executes map function.\n ", "language": "en", "n_whitespaces": 209, "n_words": 72, "vocab_size": 44 }
5
Python
5
a6f47c8e1c27d85fc09926bb35c2f1a65a6d3e79
map.py
154,457
3
18
register
https://github.com/modin-project/modin.git
REFACTOR-#4942: remove call method in favor of register due to duplication (#4943) Signed-off-by: Myachev <[email protected]>
12
0
35,990
6
1
17
def test_product_types_query_ids_not_exists(user_api_client, category): query = NOT_EXISTS_IDS_COLLECTIONS_QUERY variables = {"filter": {"ids": ["fTEJRuFHU6fd2RU=", "2XwnQNNhwCdEjhP="]}} response = user_api_client.post_graphql(query, variables) content = get_graphql_content(response, ignore_errors=True) message_error = '{"ids": [{"message": "Invalid ID specified.", "code": ""}]}' assert len(content["errors"]) == 1 assert content["errors"][0]["message"] == message_error assert content["data"]["productTypes"] is None QUERY_FILTER_PRODUCT_TYPES = @pytest.mark.parametrize( "search, expected_names", ( ("", ["The best juices", "The best beers", "The worst beers"]), ("best", ["The best juices", "The best beers"]), ("worst", ["The worst beers"]), ("average", []), ), )
saleor/graphql/product/tests/queries/test_product_types_query.py
234
@pytest.mark.parametrize( "search, expected_names", ( ("", ["The best juices", "The best beers", "The worst beers"]), ("best", ["The best juices", "The best beers"]), ("worst", ["The worst beers"]), ("average", []), ), )
saleor
{ "docstring": "\n query($filters: ProductTypeFilterInput) {\n productTypes(first: 10, filter: $filters) {\n edges {\n node {\n name\n }\n }\n }\n }\n", "language": "en", "n_whitespaces": 76, "n_words": 17, "vocab_size": 11 }
72
Python
52
d90be220d6b687d08153934a51354011a3cb5ca1
test_product_types_query.py
29,299
9
81
test_product_types_query_ids_not_exists
https://github.com/saleor/saleor.git
Split test_product.py and test_variant.py into multiple files (#11173) * Split test_product.py into multiple files * Split test_variant.py into multiple files
130
1
5,214
12
2
12
def script_args(f): args = [ magic_arguments.argument( '--out', type=str, help= ), magic_arguments.argument( '--err', type=str, help= ), magic_arguments.argument( '--bg', action="store_true", help= ), magic_arguments.argument( '--proc', type=str, help= ), magic_arguments.argument( '--no-raise-error', action="store_false", dest='raise_error', help= ) ] for arg in args: f = arg(f) return f @magics_class
IPython/core/magics/script.py
174
@magics_class
ipython
{ "docstring": "single decorator for adding script argsThe variable in which to store stdout from the script.\n If the script is backgrounded, this will be the stdout *pipe*,\n instead of the stderr text itself and will not be auto closed.\n The variable in which to store stderr from the script.\n If the script is backgrounded, this will be the stderr *pipe*,\n instead of the stderr text itself and will not be autoclosed.\n Whether to run the script in the background.\n If given, the only way to see the output of the command is\n with --out/err.\n The variable in which to store Popen instance.\n This is used only when --bg option is given.\n Whether you should raise an error message in addition to\n a stream on stderr if you get a nonzero exit code.\n ", "language": "en", "n_whitespaces": 274, "n_words": 131, "vocab_size": 67 }
42
Python
27
ce62a7a4b2c97bf8a30e8074e8fc18103a0718a0
script.py
208,424
39
101
script_args
https://github.com/ipython/ipython.git
avoid deprecated get_event_loop use our own `async_helpers.get_asyncio_loop` to track the global event loop script magics use dedicated background asyncio loop instead of trying to work on the main loop, which may or may not exist _AsyncIOProxy wraps background script objects to transfer awaitables across loops only works for coroutine methods, which might be good enough? Works for read, etc.
243
1
52,322
11
3
22
def _descendants_with_perm(self, user, action): # Get the permission object corresponding to this action permission = self._get_permission_objects_for_actions([action]).first() # Get the collections that have a GroupCollectionPermission record # for this permission and any of the user's groups; # create a list of their paths collection_roots = Collection.objects.filter( group_permissions__group__in=user.groups.all(), group_permissions__permission=permission, ).values("path", "depth") if collection_roots: # build a filter expression that will filter our model to just those # instances in collections with a path that starts with one of the above # but excluding the collection on which permission was granted collection_path_filter = Q( path__startswith=collection_roots[0]["path"] ) & Q(depth__gt=collection_roots[0]["depth"]) for collection in collection_roots[1:]: collection_path_filter = collection_path_filter | ( Q(path__startswith=collection["path"]) & Q(depth__gt=collection["depth"]) ) return Collection.objects.all().filter(collection_path_filter) else: # no matching collections return Collection.objects.none()
wagtail/core/permission_policies/collections.py
239
wagtail
{ "docstring": "\n Return a queryset of collections descended from a collection on which this user has\n a GroupCollectionPermission record for this action. Used for actions, like edit and\n delete where the user cannot modify the collection where they are granted permission.\n ", "language": "en", "n_whitespaces": 68, "n_words": 39, "vocab_size": 31 }
117
Python
78
d10f15e55806c6944827d801cd9c2d53f5da4186
collections.py
73,937
18
141
_descendants_with_perm
https://github.com/wagtail/wagtail.git
Reformat with black
391
0
16,183
18
5
20
def queryables(self) -> dict[str, Any]: # mypy doesn't recognize DataFrame._AXIS_NAMES, so we re-write it here axis_names = {0: "index", 1: "columns"} # compute the values_axes queryables d1 = [(a.cname, a) for a in self.index_axes] d2 = [(axis_names[axis], None) for axis, values in self.non_index_axes] d3 = [ (v.cname, v) for v in self.values_axes if v.name in set(self.data_columns) ] return dict(d1 + d2 + d3)
pandas/io/pytables.py
151
pandas
{ "docstring": "return a dict of the kinds allowable columns for this object", "language": "en", "n_whitespaces": 10, "n_words": 11, "vocab_size": 11 }
63
Python
52
050b3b815604652bc445d2487f6e1fc83eaa8d1f
pytables.py
169,378
9
98
queryables
https://github.com/pandas-dev/pandas.git
TYP: Upgrade mypy to 0.981 (#48871) Co-authored-by: Matthew Roeschke <[email protected]>
137
0
40,429
12
1
5
def get_tables(self) -> HandlerResponse: query = return self.native_query(query)
mindsdb/integrations/handlers/postgres_handler/postgres_handler.py
34
mindsdb
{ "docstring": "\n List all tabels in PostgreSQL without the system tables information_schema and pg_catalog\n \n SELECT\n table_schema,\n table_name,\n table_type\n FROM\n information_schema.tables\n WHERE\n table_schema NOT IN ('information_schema', 'pg_catalog')\n and table_type in ('BASE TABLE', 'VIEW')\n ", "language": "en", "n_whitespaces": 176, "n_words": 30, "vocab_size": 27 }
8
Python
8
f105dbf028004044995817384413b4cdffd7afe2
postgres_handler.py
115,044
16
18
get_tables
https://github.com/mindsdb/mindsdb.git
handlers
30
0
25,335
7
2
7
def decode_locale_str(x): # type: (bytes) -> str return x.decode(encoding=locale.getlocale()[1] or "utf-8", errors="replace")
scapy/utils.py
51
scapy
{ "docstring": "\n Decode bytes into a string using the system locale.\n Useful on Windows where it can be unusual (e.g. cp1252)\n ", "language": "en", "n_whitespaces": 29, "n_words": 19, "vocab_size": 19 }
12
Python
12
664f5985c24c2eb7645bf76327bd333fab5f92b4
utils.py
209,976
2
28
decode_locale_str
https://github.com/secdev/scapy.git
Automata: improve memory management (#3743) * Automata memory improvements (cleanup..) * Add docstrings
21
0
52,840
12
1
2
def label0(self): return self["label0"]
packages/python/plotly/plotly/graph_objs/_funnelarea.py
22
plotly.py
{ "docstring": "\n Alternate to `labels`. Builds a numeric set of labels. Use with\n `dlabel` where `label0` is the starting label and `dlabel` the\n step.\n\n The 'label0' property is a number and may be specified as:\n - An int or float\n\n Returns\n -------\n int|float\n ", "language": "en", "n_whitespaces": 107, "n_words": 41, "vocab_size": 36 }
4
Python
4
43e3a4011080911901176aab919c0ecf5046ddd3
_funnelarea.py
226,852
2
11
label0
https://github.com/plotly/plotly.py.git
switch to black .22
18
0
58,525
7
1
6
def spreadsheet(self) -> Spreadsheet: return self.client.open_by_key(self.spreadsheet_id)
airbyte-integrations/connectors/destination-google-sheets/destination_google_sheets/spreadsheet.py
33
airbyte
{ "docstring": "\n Returns pygsheets.Spreadsheet with opened target spreadsheet by key.\n ", "language": "en", "n_whitespaces": 23, "n_words": 8, "vocab_size": 8 }
6
Python
6
feb0d2f37803929a1ad0c723eea430f8cd6c201f
spreadsheet.py
5,081
5
19
spreadsheet
https://github.com/airbytehq/airbyte.git
🎉 New Destination: Implement `Destination Google Sheets` using CDK (#12135)
20
0
720
8
1
5
def require_sentencepiece(test_case): return unittest.skipUnless(is_sentencepiece_available(), "test requires SentencePiece")(test_case)
src/transformers/testing_utils.py
37
transformers
{ "docstring": "\n Decorator marking a test that requires SentencePiece. These tests are skipped when SentencePiece isn't installed.\n ", "language": "en", "n_whitespaces": 22, "n_words": 15, "vocab_size": 15 }
7
Python
7
57e6464ac9a31156f1c93e59107323e6ec01309e
testing_utils.py
37,507
2
20
require_sentencepiece
https://github.com/huggingface/transformers.git
Update all require decorators to use skipUnless when possible (#16999)
13
0
6,812
10
1
46
def test_migrate_plugin(self): project2 = self.create_project( name="hellbar", organization=self.organization, teams=[self.team] ) plugin2 = JiraPlugin() plugin2.set_option("enabled", True, project2) plugin2.set_option("default_project", "BAR", project2) plugin2.set_option("instance_url", "https://example.atlassian.net", project2) group = self.create_group(message="Hello world", culprit="foo.bar") plugin_issue = GroupMeta.objects.create( key=f"{self.plugin.slug}:tid", group_id=group.id, value="SEN-1" ) group2 = self.create_group(message="Hello world", culprit="foo.bar") plugin2_issue = GroupMeta.objects.create( key=f"{self.plugin.slug}:tid", group_id=group2.id, value="BAR-1" ) org_integration = OrganizationIntegration.objects.get(integration_id=self.integration.id) org_integration.config.update({"issues_ignored_fields": ["reporter", "test"]}) org_integration.save() with self.tasks(): self.installation.migrate_issues() assert ExternalIssue.objects.filter( organization_id=self.organization.id, integration_id=self.integration.id, key=plugin_issue.value, ).exists() assert ExternalIssue.objects.filter( organization_id=self.organization.id, integration_id=self.integration.id, key=plugin2_issue.value, ).exists() assert not GroupMeta.objects.filter( key=f"{self.plugin.slug}:tid", group_id=group.id, value="SEN-1" ).exists() assert not GroupMeta.objects.filter( key=f"{self.plugin.slug}:tid", group_id=group.id, value="BAR-1" ).exists() oi = OrganizationIntegration.objects.get(integration_id=self.integration.id) assert len(oi.config["issues_ignored_fields"]) == 4 assert self.plugin.get_option("enabled", self.project) is False assert plugin2.get_option("enabled", project2) is False
tests/sentry/integrations/jira/test_integration.py
636
sentry
{ "docstring": "Test that 2 projects with the Jira plugin enabled that each have an issue created\n from the plugin are migrated along with the ignored fields\n ", "language": "en", "n_whitespaces": 39, "n_words": 25, "vocab_size": 20 }
98
Python
58
f5e5a3b1ed97383e0699aff9eb0363e9eb5db479
test_integration.py
94,417
41
366
test_migrate_plugin
https://github.com/getsentry/sentry.git
feat(Jira): Plugin issue migration endpoint (#37577) * feat(jira): Plugin issue migration endpoint
433
0
19,084
15
1
14
def inspect(): profile = prefect.context.get_profile_context() name, env = profile.name, profile.env console.out(toml.dumps({name: env}).strip()) @profile_app.command()
src/prefect/cli/profile.py
86
@profile_app.command()
prefect
{ "docstring": "\n View settings in the current profile.\n\n Use `prefect --profile <name> profile inspect` to get settings for another profile.\n ", "language": "en", "n_whitespaces": 28, "n_words": 18, "vocab_size": 16 }
13
Python
12
cb7814344ff2e34bafbd3a0c78e1c1ff41bb74c8
profile.py
53,448
4
44
inspect
https://github.com/PrefectHQ/prefect.git
Add `prefect profile set/unset/inspect/ls`
24
1
10,811
13
6
25
def downgrade(): conn = op.get_bind() if conn.dialect.name == 'sqlite': op.execute('PRAGMA foreign_keys=OFF') with op.batch_alter_table('ab_view_menu', schema=None) as batch_op: batch_op.drop_constraint('ab_view_menu_name_uq', type_='unique') op.execute('PRAGMA foreign_keys=ON') elif conn.dialect.name == 'mysql': with op.batch_alter_table('ab_user', schema=None) as batch_op: batch_op.alter_column('email', existing_type=sa.String(256), nullable=True) batch_op.alter_column('username', existing_type=sa.String(256), nullable=True, unique=True) with op.batch_alter_table('ab_register_user', schema=None) as batch_op: batch_op.alter_column('email', existing_type=sa.String(256), nullable=True) batch_op.alter_column('username', existing_type=sa.String(256), nullable=True, unique=True) elif conn.dialect.name == 'mssql': with op.batch_alter_table('ab_register_user') as batch_op: # Drop the unique constraint on username and email constraints = get_mssql_table_constraints(conn, 'ab_register_user') for k, _ in constraints.get('UNIQUE').items(): batch_op.drop_constraint(k, type_='unique') batch_op.alter_column('username', existing_type=sa.String(256), nullable=False, unique=True) batch_op.create_unique_constraint(None, ['username']) batch_op.alter_column('email', existing_type=sa.String(256), nullable=False, unique=True) with op.batch_alter_table('ab_user') as batch_op: # Drop the unique constraint on username and email constraints = get_mssql_table_constraints(conn, 'ab_user') for k, _ in constraints.get('UNIQUE').items(): batch_op.drop_constraint(k, type_='unique') batch_op.alter_column('username', existing_type=sa.String(256), nullable=True) batch_op.create_unique_constraint(None, ['username']) batch_op.alter_column('email', existing_type=sa.String(256), nullable=True, unique=True) batch_op.create_unique_constraint(None, ['email'])
airflow/migrations/versions/0106_909884dea523_update_migration_for_fab_tables_to_add_missing_constraints.py
669
airflow
{ "docstring": "Unapply Update migration for FAB tables to add missing constraints", "language": "en", "n_whitespaces": 9, "n_words": 10, "vocab_size": 10 }
121
Python
55
2f5a567977e1219cab16c2548825a1b9eba07ab3
0106_909884dea523_update_migration_for_fab_tables_to_add_missing_constraints.py
46,542
30
393
downgrade
https://github.com/apache/airflow.git
Use Airflow.Base.metadata in FAB models (#22353) Since FAB models are now in airflow, it makes sense to monitor changes in them. Therefore we use Airflow.models.base.Base.metadata for FAB models
413
0
8,920
16
1
2
def post_display_hook(self) -> None:
src/textual/app.py
16
textual
{ "docstring": "Called immediately after a display is done. Used in tests.", "language": "en", "n_whitespaces": 9, "n_words": 10, "vocab_size": 10 }
4
Python
4
39a764f49fff7ec3363b8ea25fce3fbf1b67ca58
app.py
185,720
2
8
post_display_hook
https://github.com/Textualize/textual.git
call later
11
0
45,129
6
1
8
def approx_standard_normal_cdf(x): return 0.5 * (1.0 + paddle.tanh(np.sqrt(2.0 / np.pi) * (x + 0.044715 * paddle.pow(x, 3))))
modules/image/text_to_image/disco_diffusion_cnclip_vitb16/reverse_diffusion/model/losses.py
69
PaddleHub
{ "docstring": "\n A fast approximation of the cumulative distribution function of the\n standard normal.\n ", "language": "en", "n_whitespaces": 22, "n_words": 12, "vocab_size": 10 }
17
Python
14
f4d6e64cdc132ae868699a0ba442f4ab1d304a14
losses.py
49,801
2
51
approx_standard_normal_cdf
https://github.com/PaddlePaddle/PaddleHub.git
add disco_diffusion_cnclip_vitb16 module
23
0
9,921
16
7
15
def print_help(self): has_portfolio_start = "" if "Delta" in self.greeks["Portfolio"] else "[unvl]" has_portfolio_end = "" if "Delta" in self.greeks["Portfolio"] else "[/unvl]" has_option_start = ( "" if "Delta" in self.greeks["Option A"] or "Delta" in self.greeks["Option B"] else "[unvl]" ) has_option_end = ( "" if "Delta" in self.greeks["Option A"] or "Delta" in self.greeks["Option B"] else "[/unvl]" ) help_text = f console.print(text=help_text, menu="Stocks - Options - Hedge")
openbb_terminal/stocks/options/hedge/hedge_controller.py
235
OpenBBTerminal
{ "docstring": "Print help\n[param]Ticker: [/param]{self.ticker or None}\n[param]Expiry: [/param]{self.expiration or None}\n[cmds]\n pick pick the underlying asset position\n[/cmds][param]\nUnderlying Asset Position: [/param]{self.underlying_asset_position}\n[cmds]\n list show the available strike prices for calls and puts{has_portfolio_start}\n add add an option to the list of options{has_portfolio_end}{has_option_start}\n rmv remove an option from the list of options\n sop show selected options and neutral portfolio weights\n plot show the option payoff diagram[/cmds]{has_option_end}\n ", "language": "en", "n_whitespaces": 141, "n_words": 65, "vocab_size": 46 }
63
Python
28
54a1b6f545a0016c576e9e00eef5c003d229dacf
hedge_controller.py
284,494
28
100
print_help
https://github.com/OpenBB-finance/OpenBBTerminal.git
Feature/hedge (#1768) * [Bug] Incorrect log for reddit keys. #1733 fix * Create new feature-hedge * Significantly improve code of hedge menu * More robust * Robustness * Fix tests * Fix can't multiply sequence by non-int of type 'numpy.float64' error * Temporary fix of singular matrix error. Return first feasible solution * Update Hugo Documentation * Combining menus and cleaning up code * Tidy up call_exp * Update tests Round 1 * Update tests Round 2 * Fix linting error * Fix linting? * Fixed glitch Co-authored-by: JerBouma <[email protected]> Co-authored-by: James Maslek <[email protected]> Co-authored-by: Colin Delahunty <[email protected]> Co-authored-by: colin99d <[email protected]> Co-authored-by: didierlopes.eth <[email protected]>
192
0
84,760
12
3
7
def with_attribute(*args, **attr_dict): <div> Some text <div type="grid">1 4 0 1 0</div> <div type="graph">1,3 2,3 1,1</div> <div>this has no type</div> </div> if args: attrs = args[:] else: attrs = attr_dict.items() attrs = [(k, v) for k, v in attrs]
pipenv/patched/notpip/_vendor/pyparsing/actions.py
71
pipenv
{ "docstring": "\n Helper to create a validating parse action to be used with start\n tags created with :class:`make_xml_tags` or\n :class:`make_html_tags`. Use ``with_attribute`` to qualify\n a starting tag with a required attribute value, to avoid false\n matches on common tags such as ``<TD>`` or ``<DIV>``.\n\n Call ``with_attribute`` with a series of attribute names and\n values. Specify the list of filter attributes names and values as:\n\n - keyword arguments, as in ``(align=\"right\")``, or\n - as an explicit dict with ``**`` operator, when an attribute\n name is also a Python reserved word, as in ``**{\"class\":\"Customer\", \"align\":\"right\"}``\n - a list of name-value tuples, as in ``((\"ns1:class\", \"Customer\"), (\"ns2:align\", \"right\"))``\n\n For attribute names with a namespace prefix, you must use the second\n form. Attribute names are matched insensitive to upper/lower case.\n\n If just testing for ``class`` (with or without a namespace), use\n :class:`with_class`.\n\n To verify that the attribute exists, but without specifying a value,\n pass ``with_attribute.ANY_VALUE`` as the value.\n\n Example::\n\n html = \n div,div_end = make_html_tags(\"div\")\n\n # only match div tag having a type attribute with value \"grid\"\n div_grid = div().set_parse_action(with_attribute(type=\"grid\"))\n grid_expr = div_grid + SkipTo(div | div_end)(\"body\")\n for grid_header in grid_expr.search_string(html):\n print(grid_header.body)\n\n # construct a match with any div tag having a type attribute, regardless of the value\n div_any_type = div().set_parse_action(with_attribute(type=with_attribute.ANY_VALUE))\n div_expr = div_any_type + SkipTo(div | div_end)(\"body\")\n for div_header in div_expr.search_string(html):\n print(div_header.body)\n\n prints::\n\n 1 4 0 1 0\n\n 1 4 0 1 0\n 1,3 2,3 1,1\n ", "language": "en", "n_whitespaces": 408, "n_words": 230, "vocab_size": 143 }
39
Python
34
f3166e673fe8d40277b804d35d77dcdb760fc3b3
actions.py
20,532
8
47
with_attribute
https://github.com/pypa/pipenv.git
check point progress on only bringing in pip==22.0.4 (#4966) * vendor in pip==22.0.4 * updating vendor packaging version * update pipdeptree to fix pipenv graph with new version of pip. * Vendoring of pip-shims 0.7.0 * Vendoring of requirementslib 1.6.3 * Update pip index safety restrictions patch for pip==22.0.4 * Update patches * exclude pyptoject.toml from black to see if that helps. * Move this part of the hash collection back to the top (like prior implementation) because it affects the outcome of this test now in pip 22.0.4
139
0
3,414
11
4
9
def architecture_optimizers(self): opts = self.optimizers() if isinstance(opts,list): # pylint: disable=unsubscriptable-object arc_opts = opts[:self.arc_optim_count] if len(arc_opts) == 1: arc_opts = arc_opts[0] return arc_opts # If there is only 1 optimizer and it is the architecture optimizer if self.arc_optim_count == 1: return opts return None
nni/retiarii/oneshot/pytorch/base_lightning.py
95
nni
{ "docstring": "\n Get architecture optimizers from all optimizers. Use this to get your architecture optimizers in ``training_step``.\n\n Returns\n ----------\n opts : List[Optimizer], Optimizer, None\n Architecture optimizers defined in ``configure_architecture_optimizers``. This will be None if there is no\n architecture optimizers.\n ", "language": "en", "n_whitespaces": 95, "n_words": 37, "vocab_size": 30 }
43
Python
29
8b2eb425274cdb4537fbce4a315aec12a378d6db
base_lightning.py
111,758
10
57
architecture_optimizers
https://github.com/microsoft/nni.git
Lightning implementation for retiarii oneshot nas (#4479)
155
0
24,481
11
1
15
def test_avatar_allowed_mime_type_global(self): self._setup_local_files( { "good": {"mimetype": "image/png"}, "bad": {"mimetype": "application/octet-stream"}, } ) channel = self.make_request( "PUT", f"/profile/{self.owner}/avatar_url", content={"avatar_url": "mxc://test/bad"}, access_token=self.owner_tok, ) self.assertEqual(channel.code, 403, channel.result) self.assertEqual( channel.json_body["errcode"], Codes.FORBIDDEN, channel.json_body ) channel = self.make_request( "PUT", f"/profile/{self.owner}/avatar_url", content={"avatar_url": "mxc://test/good"}, access_token=self.owner_tok, ) self.assertEqual(channel.code, 200, channel.result)
tests/rest/client/test_profile.py
228
synapse
{ "docstring": "Tests that the MIME type whitelist for avatars is enforced when updating a\n global profile.\n ", "language": "en", "n_whitespaces": 29, "n_words": 15, "vocab_size": 15 }
41
Python
28
bf60da1a60096fac5fb778b732ff2214862ac808
test_profile.py
246,132
24
128
test_avatar_allowed_mime_type_global
https://github.com/matrix-org/synapse.git
Configurable limits on avatars (#11846) Only allow files which file size and content types match configured limits to be set as avatar. Most of the inspiration from the non-test code comes from matrix-org/synapse-dinsic#19
269
0
71,033
12
3
7
def _distribution_strategy_scope(self): if self._distribution_strategy and not tf.distribute.has_strategy(): with self._distribution_strategy.scope(): yield self._distribution_strategy.scope() else: yield
keras/optimizers/optimizer_v2/optimizer_v2.py
74
keras
{ "docstring": "Returns the `tf.distribute.Strategy` this optimizer was created under.", "language": "en", "n_whitespaces": 7, "n_words": 8, "vocab_size": 8 }
13
Python
12
84afc5193d38057e2e2badf9c889ea87d80d8fbf
optimizer_v2.py
275,531
6
40
_distribution_strategy_scope
https://github.com/keras-team/keras.git
Reformatting the codebase with black. PiperOrigin-RevId: 450093126
71
0
81,420
13
31
4
def classify_sysode(eq, funcs=None, **kwargs): r # Sympify equations and convert iterables of equations into # a list of equations
sympy/solvers/ode/ode.py
24
sympy
{ "docstring": "\n Returns a dictionary of parameter names and values that define the system\n of ordinary differential equations in ``eq``.\n The parameters are further used in\n :py:meth:`~sympy.solvers.ode.dsolve` for solving that system.\n\n Some parameter names and values are:\n\n 'is_linear' (boolean), which tells whether the given system is linear.\n Note that \"linear\" here refers to the operator: terms such as ``x*diff(x,t)`` are\n nonlinear, whereas terms like ``sin(t)*diff(x,t)`` are still linear operators.\n\n 'func' (list) contains the :py:class:`~sympy.core.function.Function`s that\n appear with a derivative in the ODE, i.e. those that we are trying to solve\n the ODE for.\n\n 'order' (dict) with the maximum derivative for each element of the 'func'\n parameter.\n\n 'func_coeff' (dict or Matrix) with the coefficient for each triple ``(equation number,\n function, order)```. The coefficients are those subexpressions that do not\n appear in 'func', and hence can be considered constant for purposes of ODE\n solving. The value of this parameter can also be a Matrix if the system of ODEs are\n linear first order of the form X' = AX where X is the vector of dependent variables.\n Here, this function returns the coefficient matrix A.\n\n 'eq' (list) with the equations from ``eq``, sympified and transformed into\n expressions (we are solving for these expressions to be zero).\n\n 'no_of_equations' (int) is the number of equations (same as ``len(eq)``).\n\n 'type_of_equation' (string) is an internal classification of the type of\n ODE.\n\n 'is_constant' (boolean), which tells if the system of ODEs is constant coefficient\n or not. This key is temporary addition for now and is in the match dict only when\n the system of ODEs is linear first order constant coefficient homogeneous. So, this\n key's value is True for now if it is available else it does not exist.\n\n 'is_homogeneous' (boolean), which tells if the system of ODEs is homogeneous. Like the\n key 'is_constant', this key is a temporary addition and it is True since this key value\n is available only when the system is linear first order constant coefficient homogeneous.\n\n References\n ==========\n -http://eqworld.ipmnet.ru/en/solutions/sysode/sode-toc1.htm\n -A. D. Polyanin and A. V. Manzhirov, Handbook of Mathematics for Engineers and Scientists\n\n Examples\n ========\n\n >>> from sympy import Function, Eq, symbols, diff\n >>> from sympy.solvers.ode.ode import classify_sysode\n >>> from sympy.abc import t\n >>> f, x, y = symbols('f, x, y', cls=Function)\n >>> k, l, m, n = symbols('k, l, m, n', Integer=True)\n >>> x1 = diff(x(t), t) ; y1 = diff(y(t), t)\n >>> x2 = diff(x(t), t, t) ; y2 = diff(y(t), t, t)\n >>> eq = (Eq(x1, 12*x(t) - 6*y(t)), Eq(y1, 11*x(t) + 3*y(t)))\n >>> classify_sysode(eq)\n {'eq': [-12*x(t) + 6*y(t) + Derivative(x(t), t), -11*x(t) - 3*y(t) + Derivative(y(t), t)], 'func': [x(t), y(t)],\n 'func_coeff': {(0, x(t), 0): -12, (0, x(t), 1): 1, (0, y(t), 0): 6, (0, y(t), 1): 0, (1, x(t), 0): -11, (1, x(t), 1): 0, (1, y(t), 0): -3, (1, y(t), 1): 1}, 'is_linear': True, 'no_of_equation': 2, 'order': {x(t): 1, y(t): 1}, 'type_of_equation': None}\n >>> eq = (Eq(diff(x(t),t), 5*t*x(t) + t**2*y(t) + 2), Eq(diff(y(t),t), -t**2*x(t) + 5*t*y(t)))\n >>> classify_sysode(eq)\n {'eq': [-t**2*y(t) - 5*t*x(t) + Derivative(x(t), t) - 2, t**2*x(t) - 5*t*y(t) + Derivative(y(t), t)],\n 'func': [x(t), y(t)], 'func_coeff': {(0, x(t), 0): -5*t, (0, x(t), 1): 1, (0, y(t), 0): -t**2, (0, y(t), 1): 0,\n (1, x(t), 0): t**2, (1, x(t), 1): 0, (1, y(t), 0): -5*t, (1, y(t), 1): 1}, 'is_linear': True, 'no_of_equation': 2,\n 'order': {x(t): 1, y(t): 1}, 'type_of_equation': None}\n\n ", "language": "en", "n_whitespaces": 723, "n_words": 551, "vocab_size": 270 }
19
Python
15
65be461082dda54c8748922f9c29a19af1279fe1
ode.py
197,362
154
559
classify_sysode
https://github.com/sympy/sympy.git
Remove abbreviations in documentation
27
0
48,505
6
1
5
def header_store_parse(self, name, value): raise NotImplementedError
python3.10.4/Lib/email/_policybase.py
20
XX-Net
{ "docstring": "Given the header name and the value provided by the application\n program, return the (name, value) that should be stored in the model.\n ", "language": "en", "n_whitespaces": 37, "n_words": 23, "vocab_size": 19 }
6
Python
6
8198943edd73a363c266633e1aa5b2a9e9c9f526
_policybase.py
223,640
2
12
header_store_parse
https://github.com/XX-net/XX-Net.git
add python 3.10.4 for windows
20
0
57,024
6
11
20
async def async_update(self): get_result = await getCmd( *self._request_args, ObjectType(ObjectIdentity(self._baseoid)) ) errindication, errstatus, errindex, restable = await get_result if errindication and not self._accept_errors: _LOGGER.error("SNMP error: %s", errindication) elif errstatus and not self._accept_errors: _LOGGER.error( "SNMP error: %s at %s", errstatus.prettyPrint(), errindex and restable[-1][int(errindex) - 1] or "?", ) elif (errindication or errstatus) and self._accept_errors: self.value = self._default_value else: for resrow in restable: self.value = resrow[-1].prettyPrint()
homeassistant/components/snmp/sensor.py
210
core
{ "docstring": "Get the latest data from the remote SNMP capable host.", "language": "en", "n_whitespaces": 9, "n_words": 10, "vocab_size": 9 }
63
Python
46
d9903c4cf985381002de8b923815b05dd24e0651
sensor.py
297,540
18
129
async_update
https://github.com/home-assistant/core.git
Bump `brother` and `pysnmplib` (#84107) * Bump brother version * Bump pysnmplib version * Update sensor platform * Update switch platform * Update tests * Bump brother Co-authored-by: J. Nick Koston <[email protected]>
245
0
96,508
16
3
10
def update_df(self, df): if get_current_execution() != "PandasOnRay" or ( not isinstance(df._query_compiler._modin_frame, PandasOnRayDataframe) ): # pragma: no cover ErrorMessage.not_implemented( "Batch Pipeline API is only implemented for `PandasOnRay` execution." ) self.df = df
modin/experimental/batch/pipeline.py
71
modin
{ "docstring": "\n Update the dataframe to perform this pipeline on.\n\n Parameters\n ----------\n df : modin.pandas.DataFrame\n The new dataframe to perform this pipeline on.\n ", "language": "en", "n_whitespaces": 68, "n_words": 21, "vocab_size": 15 }
31
Python
31
3d4404e9d9a9b2a3327f8aee664a8e71ac1f18b8
pipeline.py
153,774
8
40
update_df
https://github.com/modin-project/modin.git
FEAT-#4412: Add Batch Pipeline API to Modin (#4452) Co-authored-by: Yaroslav Igoshev <[email protected]> Co-authored-by: Mahesh Vashishtha <[email protected]> Signed-off-by: Rehan Durrani <[email protected]>
108
0
35,598
12
1
16
def forward(self, pred3d, pred2d, inputs): gt_3d_joints = inputs['joints_3d'] gt_2d_joints = inputs['joints_2d'] has_3d_joints = inputs['has_3d_joints'] has_2d_joints = inputs['has_2d_joints'] loss_3d = mpjpe(pred3d, gt_3d_joints, has_3d_joints) loss_2d = keypoint_2d_loss(self.criterion_2dpose, pred2d, gt_2d_joints, has_2d_joints) return self.weight_3d * loss_3d + self.weight_2d * loss_2d
ppdet/modeling/losses/pose3d_loss.py
114
PaddleDetection
{ "docstring": "\n mpjpe: mpjpe loss between 3d joints\n keypoint_2d_loss: 2d joints loss compute by criterion_2dpose\n ", "language": "en", "n_whitespaces": 35, "n_words": 13, "vocab_size": 11 }
36
Python
27
d4e34fe165c09db65fd00113708be1b711ac957c
pose3d_loss.py
211,430
9
72
forward
https://github.com/PaddlePaddle/PaddleDetection.git
pose3d metro modeling (#6612) * pose3d metro modeling * delete extra comments
126
0
53,094
9
3
7
def swapaxes(self, axis1, axis2, copy=True): # noqa: PR01, RT01, D200 axis1 = self._get_axis_number(axis1) axis2 = self._get_axis_number(axis2) if axis1 != axis2: return self.transpose() if copy: return self.copy() return self
modin/pandas/base.py
85
modin
{ "docstring": "\n Interchange axes and swap values axes appropriately.\n ", "language": "en", "n_whitespaces": 22, "n_words": 7, "vocab_size": 6 }
28
Python
23
605efa618e7994681f57b11d04d417f353ef8d50
base.py
153,562
8
52
swapaxes
https://github.com/modin-project/modin.git
DOCS-#3099: Fix `BasePandasDataSet` docstrings warnings (#4333) Co-authored-by: Yaroslav Igoshev <[email protected]> Signed-off-by: Alexander Myskov <[email protected]>
93
0
35,443
9
1
20
def test_shortlatex(capfd, hello_world_f90, monkeypatch): ipath = Path(hello_world_f90) mname = "blah" monkeypatch.setattr( sys, "argv", f'f2py -m {mname} {ipath} --latex-doc --short-latex'.split(), ) with util.switchdir(ipath.parent): f2pycli() out, _ = capfd.readouterr() assert "Documentation is saved to file" in out with Path(f"./{mname}module.tex").open() as otex: assert "\\documentclass" not in otex.read()
numpy/f2py/tests/test_f2py2e.py
161
numpy
{ "docstring": "Ensures that truncated documentation is written out\n\n TODO: Test to ensure this has no effect without --latex-doc\n CLI :: --latex-doc --short-latex\n ", "language": "en", "n_whitespaces": 30, "n_words": 21, "vocab_size": 20 }
44
Python
39
729ad4f92420231e2a7009b3223c6c7620b8b808
test_f2py2e.py
160,145
14
83
test_shortlatex
https://github.com/numpy/numpy.git
TST: Initialize f2py2e tests of the F2PY CLI (#20668) Increases F2PY coverage by around 15 percent. For the CLI itself it covers the major features (around 70 percent), with the exception of mostly numpy.distutils stuff. More importantly, sets the groundwork for #20056, in that passing the same testsuite should indicate feature parity.
122
0
38,517
16
2
17
def inset_axes(self, bounds, *, transform=None, zorder=5, **kwargs): if transform is None: transform = self.transAxes kwargs.setdefault('label', 'inset_axes') # This puts the rectangle into figure-relative coordinates. inset_locator = _TransformedBoundsLocator(bounds, transform) bounds = inset_locator(self, None).bounds projection_class, pkw = self.figure._process_projection_requirements( bounds, **kwargs) inset_ax = projection_class(self.figure, bounds, zorder=zorder, **pkw) # this locator lets the axes move if in data coordinates. # it gets called in `ax.apply_aspect() (of all places) inset_ax.set_axes_locator(inset_locator) self.add_child_axes(inset_ax) return inset_ax
lib/matplotlib/axes/_axes.py
165
matplotlib
{ "docstring": "\n Add a child inset Axes to this existing Axes.\n\n Warnings\n --------\n This method is experimental as of 3.0, and the API may change.\n\n Parameters\n ----------\n bounds : [x0, y0, width, height]\n Lower-left corner of inset Axes, and its width and height.\n\n transform : `.Transform`\n Defaults to `ax.transAxes`, i.e. the units of *rect* are in\n Axes-relative coordinates.\n\n projection : {None, 'aitoff', 'hammer', 'lambert', 'mollweide', \\\n'polar', 'rectilinear', str}, optional\n The projection type of the inset `~.axes.Axes`. *str* is the name\n of a custom projection, see `~matplotlib.projections`. The default\n None results in a 'rectilinear' projection.\n\n polar : bool, default: False\n If True, equivalent to projection='polar'.\n\n axes_class : subclass type of `~.axes.Axes`, optional\n The `.axes.Axes` subclass that is instantiated. This parameter\n is incompatible with *projection* and *polar*. See\n :ref:`axisartist_users-guide-index` for examples.\n\n zorder : number\n Defaults to 5 (same as `.Axes.legend`). Adjust higher or lower\n to change whether it is above or below data plotted on the\n parent Axes.\n\n **kwargs\n Other keyword arguments are passed on to the inset Axes class.\n\n Returns\n -------\n ax\n The created `~.axes.Axes` instance.\n\n Examples\n --------\n This example makes two inset Axes, the first is in Axes-relative\n coordinates, and the second in data-coordinates::\n\n fig, ax = plt.subplots()\n ax.plot(range(10))\n axin1 = ax.inset_axes([0.8, 0.1, 0.15, 0.15])\n axin2 = ax.inset_axes(\n [5, 7, 2.3, 2.3], transform=ax.transData)\n\n ", "language": "en", "n_whitespaces": 590, "n_words": 212, "vocab_size": 150 }
68
Python
54
80e672e0700fa8a268aed1bdefedbd0e493e91a7
_axes.py
108,111
12
103
inset_axes
https://github.com/matplotlib/matplotlib.git
enable Axes subclass creation by Axes.inset_axes
185
0
23,066
9
1
16
def test_no_access(self): member = self.create_user("[email protected]") self.create_member(user=member, organization=self.organization, role="member") self.login_as(member) self.get_error_response(self.organization.slug, status=status.HTTP_403_FORBIDDEN) admin = self.create_user("[email protected]") self.create_member(user=admin, organization=self.organization, role="admin") self.login_as(admin) self.get_success_response(self.organization.slug, status=status.HTTP_200_OK)
tests/sentry/api/endpoints/test_organization_codeowners_associations.py
167
sentry
{ "docstring": "\n Tests that users without the 'org:integrations' scope (i.e. Members) cannot access this endpoint.\n ", "language": "en", "n_whitespaces": 28, "n_words": 13, "vocab_size": 13 }
20
Python
18
5efa5eeb57ae6ddf740256e08ce3b9ff4ec98eaa
test_organization_codeowners_associations.py
95,411
9
102
test_no_access
https://github.com/getsentry/sentry.git
feat(codeowners): Add endpoint to view code owner associations per organization (#31030) See API-2186 So the earlier version of this PR just had the endpoint return the entire serialized ProjectCodeOwners for an organization. While that works, the intention behind this feature is to read and use the associations, so sending the raw codeowners file, and timestamps are unnecessary and increase the latency with such large payloads, especially for larger orgs. @NisanthanNanthakumar suggested limiting what the endpoint returns to just what the feature will need on the frontend, and making the endpoint name a bit more specific. OrganizationCodeOwners -> OrganizationCodeOwnersAssocations. Along with this refactor, tests have been updated.
83
0
19,206
9
3
14
def predict_proba(self, X): check_is_fitted(self) y_pred = self.final_estimator_.predict_proba(self.transform(X)) if isinstance(self._label_encoder, list): # Handle the multilabel-indicator cases y_pred = np.array([preds[:, 0] for preds in y_pred]).T return y_pred
sklearn/ensemble/_stacking.py
97
scikit-learn
{ "docstring": "Predict class probabilities for `X` using the final estimator.\n\n Parameters\n ----------\n X : {array-like, sparse matrix} of shape (n_samples, n_features)\n Training vectors, where `n_samples` is the number of samples and\n `n_features` is the number of features.\n\n Returns\n -------\n probabilities : ndarray of shape (n_samples, n_classes) or \\\n list of ndarray of shape (n_output,)\n The class probabilities of the input samples.\n ", "language": "en", "n_whitespaces": 153, "n_words": 60, "vocab_size": 41 }
25
Python
22
c18460f78441f11b3e6c15c12238695fcfe3c872
_stacking.py
260,917
6
60
predict_proba
https://github.com/scikit-learn/scikit-learn.git
EHN Add multilabel classification support for `StackingClassifier` (#24146) * Add stacking multilabel functionality * Add underscore to a class attr * Remove model from base estimator in test_stacking * Remove scale in train/test split in test_stacking_classifier_multilabel * Add stack_method as a test parameter, change RandomForestClassifier to KNeighborsClassifier in test * Update Changelog * fix doc typos * predict_proba output will be concatenate this list in an array of shape n_samples, n_outputs * n_classes - 1. Update test. * Update sklearn/ensemble/_stacking.py Co-authored-by: Guillaume Lemaitre <[email protected]> * Update doc/whats_new/v1.0.rst Co-authored-by: Guillaume Lemaitre <[email protected]> * update whats_new * add passthrough test * update whats_new with current PR * Apply suggestions from code review Co-authored-by: Julien Jerphanion <[email protected]> * update tests * Apply suggestion to update comments on `concatenate` Co-authored-by: Julien Jerphanion <[email protected]> * parametrized the two tests into one * parametrized the two tests into one * strip the mysterious trailing _r * fix multilabel list scenario * add Guillaume's recommendations * add test for * some fix * split tests * fix flake8 * add suggestions * Trigger CI * remove multiclass-multioutput from comments and docstrings Co-authored-by: Nicolas <[email protected]> Co-authored-by: Nestor Navarro <[email protected]> Co-authored-by: Nestor Navarro <[email protected]> Co-authored-by: Guillaume Lemaitre <[email protected]> Co-authored-by: Julien Jerphanion <[email protected]>
82
0
76,564
14
1
7
def test_worker_stdout(): script =
python/ray/tests/test_output.py
40
script = """@ray.remote
ray
{ "docstring": "\nimport ray\nimport sys\n\nray.init(num_cpus=2)\n\[email protected]", "language": "en", "n_whitespaces": 2, "n_words": 6, "vocab_size": 5 }
4
Python
4
2da2ac52ce3103ddb5192e7a161fec312dcdad53
test_output.py
129,473
22
96
test_worker_stdout
https://github.com/ray-project/ray.git
Unskipped test_worker_stdout (#21708)
7
2
28,963
6
11
34
def incidence_matrix(G, nodelist=None, edgelist=None, oriented=False, weight=None): import scipy as sp import scipy.sparse # call as sp.sparse if nodelist is None: nodelist = list(G) if edgelist is None: if G.is_multigraph(): edgelist = list(G.edges(keys=True)) else: edgelist = list(G.edges()) A = sp.sparse.lil_array((len(nodelist), len(edgelist))) node_index = {node: i for i, node in enumerate(nodelist)} for ei, e in enumerate(edgelist): (u, v) = e[:2] if u == v: continue # self loops give zero column try: ui = node_index[u] vi = node_index[v] except KeyError as err: raise nx.NetworkXError( f"node {u} or {v} in edgelist but not in nodelist" ) from err if weight is None: wt = 1 else: if G.is_multigraph(): ekey = e[2] wt = G[u][v][ekey].get(weight, 1) else: wt = G[u][v].get(weight, 1) if oriented: A[ui, ei] = -wt A[vi, ei] = wt else: A[ui, ei] = wt A[vi, ei] = wt return A.asformat("csc")
networkx/linalg/graphmatrix.py
438
networkx
{ "docstring": "Returns incidence matrix of G.\n\n The incidence matrix assigns each row to a node and each column to an edge.\n For a standard incidence matrix a 1 appears wherever a row's node is\n incident on the column's edge. For an oriented incidence matrix each\n edge is assigned an orientation (arbitrarily for undirected and aligning to\n direction for directed). A -1 appears for the source (tail) of an edge and\n 1 for the destination (head) of the edge. The elements are zero otherwise.\n\n Parameters\n ----------\n G : graph\n A NetworkX graph\n\n nodelist : list, optional (default= all nodes in G)\n The rows are ordered according to the nodes in nodelist.\n If nodelist is None, then the ordering is produced by G.nodes().\n\n edgelist : list, optional (default= all edges in G)\n The columns are ordered according to the edges in edgelist.\n If edgelist is None, then the ordering is produced by G.edges().\n\n oriented: bool, optional (default=False)\n If True, matrix elements are +1 or -1 for the head or tail node\n respectively of each edge. If False, +1 occurs at both nodes.\n\n weight : string or None, optional (default=None)\n The edge data key used to provide each value in the matrix.\n If None, then each edge has weight 1. Edge weights, if used,\n should be positive so that the orientation can provide the sign.\n\n Returns\n -------\n A : SciPy sparse array\n The incidence matrix of G.\n\n Notes\n -----\n For MultiGraph/MultiDiGraph, the edges in edgelist should be\n (u,v,key) 3-tuples.\n\n \"Networks are the best discrete model for so many problems in\n applied mathematics\" [1]_.\n\n References\n ----------\n .. [1] Gil Strang, Network applications: A = incidence matrix,\n http://videolectures.net/mit18085f07_strang_lec03/\n ", "language": "en", "n_whitespaces": 428, "n_words": 272, "vocab_size": 141 }
138
Python
86
8a325d26aa7fdd3a72580c4720fa97f971bbefcb
graphmatrix.py
177,330
38
275
incidence_matrix
https://github.com/networkx/networkx.git
Use scipy.sparse array datastructure (#6037) * Use scipy.sparse array datastructure * Add reminder to rm wrapper when scipy adds creation fns. * Rm mention of np matrix from code comment. * Update networkx/algorithms/bipartite/matrix.py Co-authored-by: Stefan van der Walt <[email protected]> Co-authored-by: Ross Barnowski <[email protected]> Co-authored-by: Stefan van der Walt <[email protected]>
458
0
42,349
18
1
6
def save_or_write_to_kafka(self): from sentry.region_to_control.producer import produce_audit_log_entry produce_audit_log_entry(self)
src/sentry/models/auditlogentry.py
31
sentry
{ "docstring": "\n Region Silos do not have access to the AuditLogEntry table which is specific to the control silo.\n For those silos, this method publishes the attempted audit log write to a durable kafka queue synchronously\n that will eventually be consumed by the control silo. For the control silo, this method ultimately results\n in a save() call.\n\n This method is most ideal for shared code paths that may be invoked from either control or region silos,\n but is not recommended on code paths that should always be invoked from the control silo and depend on the\n synchronous database access.\n ", "language": "en", "n_whitespaces": 155, "n_words": 97, "vocab_size": 66 }
7
Python
7
941184cd24186324fd9f7f304b7f713041834726
auditlogentry.py
86,876
3
18
save_or_write_to_kafka
https://github.com/getsentry/sentry.git
chore(hybrid-cloud): AuditLogEntry is a control silo model now (#39890) In the control silo, creating an audit log entry writes to the db directly, whilst in region silo mode creating an audit log entry will instead push to a new kafka producer that consumes into the control silo asynchronously.
28
0
18,179
7
7
24
def open_metadata(self, book, custom_columns): if config.config_use_google_drive: if not gdriveutils.is_gdrive_ready(): raise Exception('Google Drive is configured but not ready') web_content_link = gdriveutils.get_metadata_backup_via_gdrive(book.path) if not web_content_link: raise Exception('Google Drive cover url not found') stream = None try: stream = urlopen(web_content_link) except Exception as ex: # Bubble exception to calling function self.log.debug('Error reading metadata.opf: ' + str(ex)) # ToDo Check whats going on raise ex finally: if stream is not None: stream.close() else: # ToDo: Handle book folder not found or not readable book_metadata_filepath = os.path.join(config.config_calibre_dir, book.path, 'metadata.opf') #if not os.path.isfile(book_metadata_filepath): self.create_new_metadata_backup(book, custom_columns, book_metadata_filepath) # else:
cps/tasks/metadata_backup.py
209
calibre-web
{ "docstring": "namespaces = {'dc': PURL_NAMESPACE, 'opf': OPF_NAMESPACE}\n test = etree.parse(book_metadata_filepath)\n root = test.getroot()\n for i in root.iter():\n self.log.info(i)\n title = root.find(\"dc:metadata\", namespaces)\n pass\n with open(book_metadata_filepath, \"rb\") as f:\n xml = f.read()\n\n root = objectify.fromstring(xml)\n # root.metadata['{http://purl.org/dc/elements/1.1/}title']\n # root.metadata[PURL + 'title']\n # getattr(root.metadata, PURL +'title')\n # test = objectify.parse()\n pass\n # backup not found has to be created\n #raise Exception('Book cover file not found')", "language": "en", "n_whitespaces": 245, "n_words": 62, "vocab_size": 48 }
92
Python
68
26be5ee2372b08c2f906661283a12e84d6c181f8
metadata_backup.py
173,483
37
121
open_metadata
https://github.com/janeczku/calibre-web.git
Backup metadata 3rd step
380
0
40,848
15
1
5
def proxyauth(self, user): name = 'PROXYAUTH' return self._simple_command('PROXYAUTH', user)
python3.10.4/Lib/imaplib.py
37
XX-Net
{ "docstring": "Assume authentication as \"user\".\n\n Allows an authorised administrator to proxy into any user's\n mailbox.\n\n (typ, [data]) = <instance>.proxyauth(user)\n ", "language": "en", "n_whitespaces": 46, "n_words": 18, "vocab_size": 18 }
9
Python
9
8198943edd73a363c266633e1aa5b2a9e9c9f526
imaplib.py
217,908
3
20
proxyauth
https://github.com/XX-net/XX-Net.git
add python 3.10.4 for windows
30
0
55,006
8
11
27
def copy_m2m_relationships(obj1, obj2, fields, kwargs=None): for field_name in fields: if hasattr(obj1, field_name): try: field_obj = obj1._meta.get_field(field_name) except FieldDoesNotExist: continue if isinstance(field_obj, ManyToManyField): # Many to Many can be specified as field_name src_field_value = getattr(obj1, field_name) if kwargs and field_name in kwargs: override_field_val = kwargs[field_name] # TODO: Should we spike this our or just put the for loop inside the next if and make everything respect order? if field_name == 'instance_groups': # instance_groups are a list but we need to preserve the order for ig_id in override_field_val: getattr(obj2, field_name).add(ig_id) continue if isinstance(override_field_val, (set, list, QuerySet)): getattr(obj2, field_name).add(*override_field_val) continue if override_field_val.__class__.__name__ == 'ManyRelatedManager': src_field_value = override_field_val dest_field = getattr(obj2, field_name) dest_field.add(*list(src_field_value.all().values_list('id', flat=True)))
awx/main/utils/common.py
263
awx
{ "docstring": "\n In-place operation.\n Given two saved objects, copies related objects from obj1\n to obj2 to field of same name, if field occurs in `fields`\n ", "language": "en", "n_whitespaces": 36, "n_words": 23, "vocab_size": 21 }
110
Python
77
33c0fb79d66f56374d7c042ba79887faa85e2885
common.py
81,766
22
164
copy_m2m_relationships
https://github.com/ansible/awx.git
JT param everything (#12646) * Making almost all fields promptable on job templates and config models * Adding EE, IG and label access checks * Changing jobs preferred instance group function to handle the new IG cache field * Adding new ask fields to job template modules * Address unit/functional tests * Adding migration file
521
0
17,256
21
1
21
def keypoint_rotate(keypoint, angle, rows, cols, **params): center = (cols - 1) * 0.5, (rows - 1) * 0.5 matrix = cv2.getRotationMatrix2D(center, angle, 1.0) x, y, a, s = keypoint[:4] x, y = cv2.transform(np.array([[[x, y]]]), matrix).squeeze() return x, y, a + math.radians(angle), s @preserve_channel_dim
albumentations/augmentations/geometric/functional.py
153
@preserve_channel_dim
albumentations
{ "docstring": "Rotate a keypoint by angle.\n\n Args:\n keypoint (tuple): A keypoint `(x, y, angle, scale)`.\n angle (float): Rotation angle.\n rows (int): Image height.\n cols (int): Image width.\n\n Returns:\n tuple: A keypoint `(x, y, angle, scale)`.\n\n ", "language": "en", "n_whitespaces": 78, "n_words": 34, "vocab_size": 23 }
43
Python
32
557b7b44b393d0701413ed8012a920a0691e06cb
functional.py
225,637
6
107
keypoint_rotate
https://github.com/albumentations-team/albumentations.git
Fix Affine wrong rotation angle (#1091) * Fix Affine wrong rotation angle * Link to issue * Fix Perspective rot. angle for keypoints, fix Affine * Change angle sign, do not change it manually after all changes * Tests * Fix tests and image center * Fix shift_rotate tests Co-authored-by: Eugene Khvedchenya <[email protected]> Co-authored-by: Vladimir Iglovikov <[email protected]>
60
1
57,482
15
1
7
def disable_memoization() -> None: ParserElement.reset_cache() ParserElement._left_recursion_enabled = False ParserElement._packratEnabled = False ParserElement._parse = ParserElement._parseNoCache
pipenv/patched/notpip/_vendor/pyparsing/core.py
51
pipenv
{ "docstring": "\n Disables active Packrat or Left Recursion parsing and their memoization\n\n This method also works if neither Packrat nor Left Recursion are enabled.\n This makes it safe to call before activating Packrat nor Left Recursion\n to clear any previous settings.\n ", "language": "en", "n_whitespaces": 75, "n_words": 39, "vocab_size": 30 }
14
Python
11
f3166e673fe8d40277b804d35d77dcdb760fc3b3
core.py
20,579
12
29
disable_memoization
https://github.com/pypa/pipenv.git
check point progress on only bringing in pip==22.0.4 (#4966) * vendor in pip==22.0.4 * updating vendor packaging version * update pipdeptree to fix pipenv graph with new version of pip. * Vendoring of pip-shims 0.7.0 * Vendoring of requirementslib 1.6.3 * Update pip index safety restrictions patch for pip==22.0.4 * Update patches * exclude pyptoject.toml from black to see if that helps. * Move this part of the hash collection back to the top (like prior implementation) because it affects the outcome of this test now in pip 22.0.4
49
0
3,434
7
3
14
def scale(image, factor, resample=Image.Resampling.BICUBIC): if factor == 1: return image.copy() elif factor <= 0: raise ValueError("the factor must be greater than 0") else: size = (round(factor * image.width), round(factor * image.height)) return image.resize(size, resample)
src/PIL/ImageOps.py
111
Pillow
{ "docstring": "\n Returns a rescaled image by a specific factor given in parameter.\n A factor greater than 1 expands the image, between 0 and 1 contracts the\n image.\n\n :param image: The image to rescale.\n :param factor: The expansion factor, as a float.\n :param resample: Resampling method to use. Default is\n :py:attr:`PIL.Image.BICUBIC`. See :ref:`concept-filters`.\n :returns: An :py:class:`~PIL.Image.Image` object.\n ", "language": "en", "n_whitespaces": 100, "n_words": 55, "vocab_size": 45 }
34
Python
30
f8e4e9c2dd94c6f4789639dd891b8a6d5fb16e14
ImageOps.py
242,247
8
69
scale
https://github.com/python-pillow/Pillow.git
Added enums
74
0
69,806
14
4
12
def get_all(self, name, failobj=None): values = [] name = name.lower() for k, v in self._headers: if k.lower() == name: values.append(self.policy.header_fetch_parse(k, v)) if not values: return failobj return values
python3.10.4/Lib/email/message.py
103
XX-Net
{ "docstring": "Return a list of all the values for the named field.\n\n These will be sorted in the order they appeared in the original\n message, and may contain duplicates. Any fields deleted and\n re-inserted are always appended to the header list.\n\n If no such fields exist, failobj is returned (defaults to None).\n ", "language": "en", "n_whitespaces": 87, "n_words": 51, "vocab_size": 43 }
28
Python
24
8198943edd73a363c266633e1aa5b2a9e9c9f526
message.py
223,791
9
64
get_all
https://github.com/XX-net/XX-Net.git
add python 3.10.4 for windows
107
0
57,068
14
6
15
def get_error_message(subscriber, num=1e6, error_type=None, timeout=20): deadline = time.time() + timeout msgs = [] while time.time() < deadline and len(msgs) < num: _, error_data = subscriber.poll(timeout=deadline - time.time()) if not error_data: # Timed out before any data is received. break if error_type is None or error_type == error_data.type: msgs.append(error_data) else: time.sleep(0.01) return msgs
python/ray/_private/test_utils.py
159
ray
{ "docstring": "Gets errors from GCS subscriber.\n\n Returns maximum `num` error strings within `timeout`.\n Only returns errors of `error_type` if specified.\n ", "language": "en", "n_whitespaces": 28, "n_words": 19, "vocab_size": 18 }
52
Python
43
391901f86bc0bec6d3199ac05f316a05bcc4b910
test_utils.py
146,747
12
99
get_error_message
https://github.com/ray-project/ray.git
[Remove Redis Pubsub 2/n] clean up remaining Redis references in gcs_utils.py (#23233) Continue to clean up Redis and other related Redis references, for - gcs_utils.py - log_monitor.py - `publish_error_to_driver()`
139
0
33,771
14
1
3
def socket(self): return self.sock # Utility methods
python3.10.4/Lib/imaplib.py
20
XX-Net
{ "docstring": "Return socket instance used to connect to IMAP4 server.\n\n socket = <instance>.socket()\n ", "language": "en", "n_whitespaces": 26, "n_words": 12, "vocab_size": 10 }
7
Python
7
8198943edd73a363c266633e1aa5b2a9e9c9f526
imaplib.py
217,955
2
10
socket
https://github.com/XX-net/XX-Net.git
add python 3.10.4 for windows
30
0
55,035
6
2
6
def path_to_string(path): if isinstance(path, os.PathLike): return os.fspath(path) return path
keras/utils/io_utils.py
42
keras
{ "docstring": "Convert `PathLike` objects to their string representation.\n\n If given a non-string typed path object, converts it to its string\n representation.\n\n If the object passed to `path` is not among the above, then it is\n returned unchanged. This allows e.g. passthrough of file objects\n through this function.\n\n Args:\n path: `PathLike` object that represents a path\n\n Returns:\n A string representation of the path argument, if Python support exists.\n ", "language": "en", "n_whitespaces": 100, "n_words": 66, "vocab_size": 49 }
9
Python
8
84afc5193d38057e2e2badf9c889ea87d80d8fbf
io_utils.py
276,920
4
25
path_to_string
https://github.com/keras-team/keras.git
Reformatting the codebase with black. PiperOrigin-RevId: 450093126
25
0
81,776
9
3
5
def creatable_subpage_models(cls): return [ page_model for page_model in cls.allowed_subpage_models() if page_model.is_creatable ]
wagtail/core/models/__init__.py
37
wagtail
{ "docstring": "\n Returns the list of page types that may be created under this page type,\n as a list of model classes\n ", "language": "en", "n_whitespaces": 42, "n_words": 20, "vocab_size": 17 }
12
Python
11
d10f15e55806c6944827d801cd9c2d53f5da4186
__init__.py
73,787
6
22
creatable_subpage_models
https://github.com/wagtail/wagtail.git
Reformat with black
66
0
16,110
9
1
9
def set_full_path(self) -> None: self.full_path = Path( self.config["user_data_dir"] / "models" / f"{self.freqai_info['identifier']}" ) self.full_path.mkdir(parents=True, exist_ok=True)
freqtrade/freqai/freqai_interface.py
82
freqtrade
{ "docstring": "\n Creates and sets the full path for the identifier\n ", "language": "en", "n_whitespaces": 24, "n_words": 9, "vocab_size": 8 }
15
Python
14
5ee3b8cbbb89c8a57cb42cc3253001e47720991b
freqai_interface.py
151,540
8
40
set_full_path
https://github.com/freqtrade/freqtrade.git
update config recording to use all configs, fix tests
54
0
35,040
13
2
9
def get_template(self, template_name): template, origin = self.find_template(template_name) if not hasattr(template, "render"): # template needs to be compiled template = Template(template, origin, template_name, engine=self) return template
django/template/engine.py
70
django
{ "docstring": "\n Return a compiled Template object for the given template name,\n handling template inheritance recursively.\n ", "language": "en", "n_whitespaces": 36, "n_words": 14, "vocab_size": 13 }
25
Python
22
9c19aff7c7561e3a82978a272ecdaad40dda5c00
engine.py
206,272
5
43
get_template
https://github.com/django/django.git
Refs #33476 -- Reformatted code with Black.
75
0
51,459
11
2
31
def get_roi_rel_points_test(self, mask_pred, pred_label, cfg): num_points = cfg.subdivision_num_points uncertainty_map = get_uncertainty(mask_pred, pred_label) num_rois, _, mask_height, mask_width = uncertainty_map.shape # During ONNX exporting, the type of each elements of 'shape' is # `Tensor(float)`, while it is `float` during PyTorch inference. if isinstance(mask_height, torch.Tensor): h_step = 1.0 / mask_height.float() w_step = 1.0 / mask_width.float() else: h_step = 1.0 / mask_height w_step = 1.0 / mask_width # cast to int to avoid dynamic K for TopK op in ONNX mask_size = int(mask_height * mask_width) uncertainty_map = uncertainty_map.view(num_rois, mask_size) num_points = min(mask_size, num_points) point_indices = uncertainty_map.topk(num_points, dim=1)[1] xs = w_step / 2.0 + (point_indices % mask_width).float() * w_step ys = h_step / 2.0 + (point_indices // mask_width).float() * h_step point_coords = torch.stack([xs, ys], dim=2) return point_indices, point_coords
mmdet/models/roi_heads/mask_heads/mask_point_head.py
267
mmdetection
{ "docstring": "Get ``num_points`` most uncertain points during test.\n\n Args:\n mask_pred (Tensor): A tensor of shape (num_rois, num_classes,\n mask_height, mask_width) for class-specific or class-agnostic\n prediction.\n pred_label (list): The predication class for each instance.\n cfg (dict): Testing config of point head.\n\n Returns:\n point_indices (Tensor): A tensor of shape (num_rois, num_points)\n that contains indices from [0, mask_height x mask_width) of the\n most uncertain points.\n point_coords (Tensor): A tensor of shape (num_rois, num_points, 2)\n that contains [0, 1] x [0, 1] normalized coordinates of the\n most uncertain points from the [mask_height, mask_width] grid .\n ", "language": "en", "n_whitespaces": 255, "n_words": 89, "vocab_size": 58 }
123
Python
80
7d1ce22e3328ba89c11b6cdaafff6c96d9da3f4f
mask_point_head.py
244,149
18
175
get_roi_rel_points_test
https://github.com/open-mmlab/mmdetection.git
Fix `pointrend` missing `get_uncertainty` function bug (#7550) * [Fix] Adjust the order of get_classes and FileClient. (#7276) * delete -sv (#7277) Co-authored-by: Wenwei Zhang <[email protected]> * [Docs] Add Chinese version of finetune (#7178) * [Fix] Fix wrong img name in onnx2tensorrt.py (#7157) * [Docs] fix albumentations installed way (#7143) * Update finetune.md Translate the finetune.md doc to Chinese * Update finetune.md * Update finetune.md * Update finetune.md * fix lint * fx lint * fix pr Co-authored-by: Jamie <[email protected]> Co-authored-by: BigDong <[email protected]> * set unmap_results=True in ssd_head (#7328) * Update YOLOX log for non square input (#7235) * [Enhance] add cpu_num in cocopanoptic for pq computing (#7315) * add cpu_num in cocopanoptic for pq computing * cpu_num -> nproc * move nproc to evaluate * [Enhancement] Allow to set channel_order in LoadImageFromFile (#7258) * allow to set channel_order when loading images * fix lint * fix unit test * fix lint * [Fix] Force the inputs of `get_bboxes` in yolox_head to float32. (#7324) * Fix softnms bug * Add force_fp32 in corner_head and centripetal_head * [Fix] Fix typo in FPN neck (#7347) * update readme and pretrained related (#7301) * [Docs] Add Chinese version of onnx2tensorrt.md (#7219) * Fix bug of docs * translate onnx2tensorrt.md * fix * fix end-of-file-fixer * fix some bugs * 修复链接跳转 * 修复链接跳转 * 修复链接跳转-测试1 * 修复链接跳转-测试2 * 修复链接跳转-测试2 * 修复链接跳转-测试3 * 修复链接跳转-测试5 * Fix Co-authored-by: jbwang1997 <[email protected]> * Update useful_tools.md (#7180) * [Enhancement]: Update colab tutorials (#7310) * update colab tutorials * update * fix * fix wrong CUDA explaination * resolve comments * resolve comments * fix typo Co-authored-by: Cedric Luo <[email protected]> Co-authored-by: tripleMu <[email protected]> Co-authored-by: jbwang1997 <[email protected]> Co-authored-by: kira <[email protected]> Co-authored-by: Wenwei Zhang <[email protected]> * Fix pointrend missing get_uncertainty function bug Co-authored-by: Wencheng Wu <[email protected]> Co-authored-by: Yue Zhou <[email protected]> Co-authored-by: Wenwei Zhang <[email protected]> Co-authored-by: MingJian.L <[email protected]> Co-authored-by: Jamie <[email protected]> Co-authored-by: BigDong <[email protected]> Co-authored-by: Cedric Luo <[email protected]> Co-authored-by: Yosuke Shinya <[email protected]> Co-authored-by: Cedric Luo <[email protected]> Co-authored-by: Jingwei Zhang <[email protected]> Co-authored-by: jbwang1997 <[email protected]> Co-authored-by: Xiangxu-0103 <[email protected]> Co-authored-by: tripleMu <[email protected]> Co-authored-by: kira <[email protected]>
286
0
70,261
12
1
2
async def logger_test_deployment(orion_client):
tests/test_logging.py
14
prefect
{ "docstring": "\n A deployment with a flow that returns information about the given loggers\n ", "language": "en", "n_whitespaces": 19, "n_words": 12, "vocab_size": 12 }
3
Python
3
b110baccdbfde300f410b069c873e8b2a2c98e00
test_logging.py
53,100
11
53
logger_test_deployment
https://github.com/PrefectHQ/prefect.git
Add test
6
0
10,717
6
1
2
def pointcloud(self): return self["pointcloud"]
packages/python/plotly/plotly/graph_objs/layout/template/_data.py
22
plotly.py
{ "docstring": "\n The 'pointcloud' property is a tuple of instances of\n Pointcloud that may be specified as:\n - A list or tuple of instances of plotly.graph_objs.layout.template.data.Pointcloud\n - A list or tuple of dicts of string/value properties that\n will be passed to the Pointcloud constructor\n\n Supported dict properties:\n\n Returns\n -------\n tuple[plotly.graph_objs.layout.template.data.Pointcloud]\n ", "language": "en", "n_whitespaces": 131, "n_words": 48, "vocab_size": 33 }
4
Python
4
43e3a4011080911901176aab919c0ecf5046ddd3
_data.py
232,547
2
11
pointcloud
https://github.com/plotly/plotly.py.git
switch to black .22
18
0
63,991
7
5
25
def varOr(population, toolbox, lambda_, cxpb, mutpb): offspring = [] for _ in range(lambda_): op_choice = np.random.random() if op_choice < cxpb: # Apply crossover ind1, ind2 = pick_two_individuals_eligible_for_crossover(population) if ind1 is not None: ind1, _ = toolbox.mate(ind1, ind2) del ind1.fitness.values else: # If there is no pair eligible for crossover, we still want to # create diversity in the population, and do so by mutation instead. ind1 = mutate_random_individual(population, toolbox) offspring.append(ind1) elif op_choice < cxpb + mutpb: # Apply mutation ind = mutate_random_individual(population, toolbox) offspring.append(ind) else: # Apply reproduction idx = np.random.randint(0, len(population)) offspring.append(toolbox.clone(population[idx])) return offspring
tpot/gp_deap.py
228
tpot
{ "docstring": "Part of an evolutionary algorithm applying only the variation part\n (crossover, mutation **or** reproduction). The modified individuals have\n their fitness invalidated. The individuals are cloned so returned\n population is independent of the input population.\n :param population: A list of individuals to vary.\n :param toolbox: A :class:`~deap.base.Toolbox` that contains the evolution\n operators.\n :param lambda\\_: The number of children to produce\n :param cxpb: The probability of mating two individuals.\n :param mutpb: The probability of mutating an individual.\n :returns: The final population\n :returns: A class:`~deap.tools.Logbook` with the statistics of the\n evolution\n The variation goes as follow. On each of the *lambda_* iteration, it\n selects one of the three operations; crossover, mutation or reproduction.\n In the case of a crossover, two individuals are selected at random from\n the parental population :math:`P_\\mathrm{p}`, those individuals are cloned\n using the :meth:`toolbox.clone` method and then mated using the\n :meth:`toolbox.mate` method. Only the first child is appended to the\n offspring population :math:`P_\\mathrm{o}`, the second child is discarded.\n In the case of a mutation, one individual is selected at random from\n :math:`P_\\mathrm{p}`, it is cloned and then mutated using using the\n :meth:`toolbox.mutate` method. The resulting mutant is appended to\n :math:`P_\\mathrm{o}`. In the case of a reproduction, one individual is\n selected at random from :math:`P_\\mathrm{p}`, cloned and appended to\n :math:`P_\\mathrm{o}`.\n This variation is named *Or* beceause an offspring will never result from\n both operations crossover and mutation. The sum of both probabilities\n shall be in :math:`[0, 1]`, the reproduction probability is\n 1 - *cxpb* - *mutpb*.\n ", "language": "en", "n_whitespaces": 361, "n_words": 245, "vocab_size": 131 }
95
Python
68
388616b6247ca4ea8de4e2f340d6206aee523541
gp_deap.py
181,912
19
142
varOr
https://github.com/EpistasisLab/tpot.git
Revert "Deployed 7ccda9a with MkDocs version: 1.3.0" This reverts commit bd9629c40e01241766197119b581a99409b07068.
301
0
43,664
15
2
9
def make_valid(self): if geos_version_tuple() < (3, 8): raise GEOSException("GEOSGeometry.make_valid() requires GEOS >= 3.8.0.") return GEOSGeometry(capi.geos_makevalid(self.ptr), srid=self.srid) # #### Unary predicates ####
django/contrib/gis/geos/geometry.py
68
django
{ "docstring": "\n Attempt to create a valid representation of a given invalid geometry\n without losing any of the input vertices.\n ", "language": "en", "n_whitespaces": 40, "n_words": 18, "vocab_size": 16 }
21
Python
20
9c19aff7c7561e3a82978a272ecdaad40dda5c00
geometry.py
204,039
4
40
make_valid
https://github.com/django/django.git
Refs #33476 -- Reformatted code with Black.
56
0
50,621
10
1
10
def test_image_inside_paragraph(self): # In Draftail's data model, images are block-level elements and therefore # split up preceding / following text into their own paragraphs converter = ContentstateConverter(features=["image"]) result = json.loads( converter.from_database_format( ) ) self.assertContentStateEqual( result, { "blocks": [ { "key": "00000", "inlineStyleRanges": [], "entityRanges": [], "depth": 0, "text": "before", "type": "unstyled", }, { "key": "00000", "inlineStyleRanges": [], "entityRanges": [{"key": 0, "offset": 0, "length": 1}], "depth": 0, "text": " ", "type": "atomic", }, { "key": "00000", "inlineStyleRanges": [], "entityRanges": [], "depth": 0, "text": "after", "type": "unstyled", }, ], "entityMap": { "0": { "data": { "format": "left", "alt": "an image", "id": "1", "src": "/media/not-found", }, "mutability": "IMMUTABLE", "type": "IMAGE", } }, }, )
wagtail/admin/tests/test_contentstate.py
347
wagtail
{ "docstring": "\n <p>before <embed embedtype=\"image\" alt=\"an image\" id=\"1\" format=\"left\" /> after</p>\n ", "language": "en", "n_whitespaces": 32, "n_words": 9, "vocab_size": 9 }
111
Python
72
d10f15e55806c6944827d801cd9c2d53f5da4186
test_contentstate.py
71,919
52
181
test_image_inside_paragraph
https://github.com/wagtail/wagtail.git
Reformat with black
1,056
0
15,780
16
8
12
def get_losses_for(self, inputs): if inputs is None: # Requesting unconditional losses. return [l for l in self.losses if l._unconditional_loss] # Requesting input-conditional losses. losses = [l for l in self.losses if not l._unconditional_loss] inputs = tf.nest.flatten(inputs) reachable = tf_utils.get_reachable_from_inputs(inputs, losses) return [l for l in losses if l in reachable]
keras/engine/base_layer_v1.py
117
keras
{ "docstring": "Retrieves losses relevant to a specific set of inputs.\n\n Args:\n inputs: Input tensor or list/tuple of input tensors.\n\n Returns:\n List of loss tensors of the layer that depend on `inputs`.\n ", "language": "en", "n_whitespaces": 69, "n_words": 30, "vocab_size": 27 }
50
Python
27
84afc5193d38057e2e2badf9c889ea87d80d8fbf
base_layer_v1.py
270,970
7
75
get_losses_for
https://github.com/keras-team/keras.git
Reformatting the codebase with black. PiperOrigin-RevId: 450093126
121
0
80,627
10
1
12
def load_config(file_path): _, ext = os.path.splitext(file_path) assert ext in ['.yml', '.yaml'], "only support yaml files for now" config = yaml.load(open(file_path, 'rb'), Loader=yaml.Loader) return config
tools/program.py
84
PaddleOCR
{ "docstring": "\n Load config from yml/yaml file.\n Args:\n file_path (str): Path of the config file to be loaded.\n Returns: global config\n ", "language": "en", "n_whitespaces": 39, "n_words": 19, "vocab_size": 17 }
24
Python
21
a323fce66dd68a881cf599526185b52ab5df356b
program.py
22,954
5
49
load_config
https://github.com/PaddlePaddle/PaddleOCR.git
vqa code integrated into ppocr training system
39
0
4,491
11
1
47
def test_basic(request): processing_factory = MetricsConsumerStrategyFactory( max_msg_batch_size=1, max_msg_batch_time=1, max_parallel_batch_size=1, max_parallel_batch_time=1, max_batch_size=1, max_batch_time=1, processes=1, input_block_size=1024, output_block_size=1024, config=MetricsIngestConfiguration( db_backend=IndexerStorage.MOCK, db_backend_options={}, input_topic="ingest-metrics", output_topic="snuba-metrics", use_case_id=UseCaseKey.RELEASE_HEALTH, internal_metrics_tag="test", writes_limiter_cluster_options={}, writes_limiter_namespace="test", ), ) strategy = processing_factory.create_with_partitions( lambda _: None, {Partition(topic=Topic(name="ingest-bogus-metrics"), index=1): 1}, ) message = Message( Partition(Topic("topic"), 0), 0, KafkaPayload(None, json.dumps(counter_payload).encode("utf-8"), []), datetime.now(), ) # Just assert that the strategy does not crash. Further assertions, such as # on the produced messages, would slow down the test significantly. strategy.submit(message=message) strategy.close() strategy.join()
tests/sentry/sentry_metrics/test_parallel_indexer.py
284
sentry
{ "docstring": "\n Integration test to verify that the parallel indexer can spawn subprocesses\n properly. The main purpose is to verify that there are no\n pickling/unpickling errors when passing the strategy into the\n ParallelTransformStep, as that is easy to break.\n ", "language": "en", "n_whitespaces": 53, "n_words": 37, "vocab_size": 29 }
73
Python
65
d62c4935f02238a8f3991da5ef280a4bf249d771
test_parallel_indexer.py
85,561
35
184
test_basic
https://github.com/getsentry/sentry.git
fix(metrics): Fix startup crash in parallel indexer [sns-1490] (#38455) Since https://github.com/getsentry/sentry/pull/38225 the parallel indexer fails to serialize the processing function here: https://github.com/getsentry/sentry/blob/9bf499ad95030ed1112f117c5c1be59b2e036509/src/sentry/sentry_metrics/consumers/indexer/parallel.py#L115 We need to make sure the message processor is pickleable. So the config also needs to be pickleable. The old code worked because it imported the config and indexer from settings instead of attempting to pickle them.
316
0
18,005
17
5
17
def _get_coeff_exp(expr, x): from sympy.simplify import powsimp (c, m) = expand_power_base(powsimp(expr)).as_coeff_mul(x) if not m: return c, S.Zero [m] = m if m.is_Pow: if m.base != x: raise _CoeffExpValueError('expr not of form a*x**b') return c, m.exp elif m == x: return c, S.One else: raise _CoeffExpValueError('expr not of form a*x**b: %s' % expr)
sympy/integrals/meijerint.py
148
sympy
{ "docstring": "\n When expr is known to be of the form c*x**b, with c and/or b possibly 1,\n return c, b.\n\n Examples\n ========\n\n >>> from sympy.abc import x, a, b\n >>> from sympy.integrals.meijerint import _get_coeff_exp\n >>> _get_coeff_exp(a*x**b, x)\n (a, b)\n >>> _get_coeff_exp(x, x)\n (1, 1)\n >>> _get_coeff_exp(2*x, x)\n (2, 1)\n >>> _get_coeff_exp(x**3, x)\n (1, 3)\n ", "language": "en", "n_whitespaces": 99, "n_words": 53, "vocab_size": 40 }
52
Python
37
f757f3daae6e11ea0cfb7dadc133274d8d74315f
meijerint.py
196,786
14
90
_get_coeff_exp
https://github.com/sympy/sympy.git
Reordered imports 2
122
0
48,173
12
1
8
def activate(self) -> str: load_kube_config_from_dict( config_dict=self.config, context=self.context, ) return self.current_context()
src/prefect/blocks/kubernetes.py
48
prefect
{ "docstring": "\n Convenience method for activating the k8s config stored in an instance of this block\n\n Returns current_context for sanity check\n ", "language": "en", "n_whitespaces": 41, "n_words": 19, "vocab_size": 18 }
10
Python
10
8f3ffd09dc47bfd2af6a635cc04c640febffd519
kubernetes.py
56,999
11
29
activate
https://github.com/PrefectHQ/prefect.git
add test coerage for get_api_client and activate
60
0
11,603
9
6
15
def parse_python_requires(value): # type: (t.Any) -> tuple[str, ...] if not isinstance(value, str): raise ValueError('python_requires must must be of type `str` not type `%s`' % type(value)) versions: tuple[str, ...] if value == 'default': versions = SUPPORTED_PYTHON_VERSIONS elif value == 'controller': versions = CONTROLLER_PYTHON_VERSIONS else: specifier_set = SpecifierSet(value) versions = tuple(version for version in SUPPORTED_PYTHON_VERSIONS if specifier_set.contains(Version(version))) return versions
test/lib/ansible_test/_internal/content_config.py
136
ansible
{ "docstring": "Parse the given 'python_requires' version specifier and return the matching Python versions.", "language": "en", "n_whitespaces": 11, "n_words": 12, "vocab_size": 11 }
57
Python
41
f2abfc4b3d03a2baa078477d0ad2241263a00668
content_config.py
267,750
12
79
parse_python_requires
https://github.com/ansible/ansible.git
ansible-test - Parse content config only once. (#78418)
114
0
79,034
16
3
12
def set_to_context(self, name): attribute = self.fattributes[name] if isinstance(attribute, NonInheritableFieldAttribute): # setting to sentinel will trigger 'default/default()' on getter setattr(self, name, Sentinel) else: try: setattr(self, name, self._get_parent_attribute(name, omit=True)) except AttributeError: # mostly playcontext as only tasks/handlers/blocks really resolve parent setattr(self, name, Sentinel)
lib/ansible/playbook/base.py
100
ansible
{ "docstring": " set to parent inherited value or Sentinel as appropriate", "language": "en", "n_whitespaces": 9, "n_words": 9, "vocab_size": 9 }
41
Python
35
ff6e4da36addccb06001f7b05b1a9c04ae1d7984
base.py
268,567
9
64
set_to_context
https://github.com/ansible/ansible.git
fixes to FA inheritance (#78990) finalized applies to all field attributes fix getting parent value also remove unused/needed extend/prepend signature moar testing
158
0
79,551
15
3
16
def is_url_equal(url, other_url): # type: (str, str) -> bool if not isinstance(url, str): raise TypeError(f"Expected string for url, received {url!r}") if not isinstance(other_url, str): raise TypeError(f"Expected string for url, received {other_url!r}") parsed_url = urllib3_util.parse_url(url) parsed_other_url = urllib3_util.parse_url(other_url) unparsed = parsed_url._replace(auth=None, query=None, fragment=None).url unparsed_other = parsed_other_url._replace(auth=None, query=None, fragment=None).url return unparsed == unparsed_other
pipenv/utils/internet.py
164
pipenv
{ "docstring": "\n Compare two urls by scheme, host, and path, ignoring auth\n\n :param str url: The initial URL to compare\n :param str url: Second url to compare to the first\n :return: Whether the URLs are equal without **auth**, **query**, and **fragment**\n :rtype: bool\n\n >>> is_url_equal(\"https://user:[email protected]/some/path?some_query\",\n \"https://user2:[email protected]/some/path\")\n True\n\n >>> is_url_equal(\"https://user:[email protected]/some/path?some_query\",\n \"https://mydomain.com/some?some_query\")\n False\n ", "language": "en", "n_whitespaces": 116, "n_words": 49, "vocab_size": 39 }
51
Python
35
3387881a6d4fc2d8bdc0f05c484cb2f7222acfb8
internet.py
19,514
10
98
is_url_equal
https://github.com/pypa/pipenv.git
Code reorg utils into utils module reduces complexity (#4990) * Split apart the massive utils.py into a utils module
92
0
3,008
11
1
3
def iloc(self) -> _iLocIndexer: return _iLocIndexer("iloc", self)
pandas/core/indexing.py
28
pandas
{ "docstring": "\n Purely integer-location based indexing for selection by position.\n\n ``.iloc[]`` is primarily integer position based (from ``0`` to\n ``length-1`` of the axis), but may also be used with a boolean\n array.\n\n Allowed inputs are:\n\n - An integer, e.g. ``5``.\n - A list or array of integers, e.g. ``[4, 3, 0]``.\n - A slice object with ints, e.g. ``1:7``.\n - A boolean array.\n - A ``callable`` function with one argument (the calling Series or\n DataFrame) and that returns valid output for indexing (one of the above).\n This is useful in method chains, when you don't have a reference to the\n calling object, but would like to base your selection on some value.\n - A tuple of row and column indexes. The tuple elements consist of one of the\n above inputs, e.g. ``(0, 1)``.\n\n ``.iloc`` will raise ``IndexError`` if a requested indexer is\n out-of-bounds, except *slice* indexers which allow out-of-bounds\n indexing (this conforms with python/numpy *slice* semantics).\n\n See more at :ref:`Selection by Position <indexing.integer>`.\n\n See Also\n --------\n DataFrame.iat : Fast integer location scalar accessor.\n DataFrame.loc : Purely label-location based indexer for selection by label.\n Series.iloc : Purely integer-location based indexing for\n selection by position.\n\n Examples\n --------\n >>> mydict = [{'a': 1, 'b': 2, 'c': 3, 'd': 4},\n ... {'a': 100, 'b': 200, 'c': 300, 'd': 400},\n ... {'a': 1000, 'b': 2000, 'c': 3000, 'd': 4000 }]\n >>> df = pd.DataFrame(mydict)\n >>> df\n a b c d\n 0 1 2 3 4\n 1 100 200 300 400\n 2 1000 2000 3000 4000\n\n **Indexing just the rows**\n\n With a scalar integer.\n\n >>> type(df.iloc[0])\n <class 'pandas.core.series.Series'>\n >>> df.iloc[0]\n a 1\n b 2\n c 3\n d 4\n Name: 0, dtype: int64\n\n With a list of integers.\n\n >>> df.iloc[[0]]\n a b c d\n 0 1 2 3 4\n >>> type(df.iloc[[0]])\n <class 'pandas.core.frame.DataFrame'>\n\n >>> df.iloc[[0, 1]]\n a b c d\n 0 1 2 3 4\n 1 100 200 300 400\n\n With a `slice` object.\n\n >>> df.iloc[:3]\n a b c d\n 0 1 2 3 4\n 1 100 200 300 400\n 2 1000 2000 3000 4000\n\n With a boolean mask the same length as the index.\n\n >>> df.iloc[[True, False, True]]\n a b c d\n 0 1 2 3 4\n 2 1000 2000 3000 4000\n\n With a callable, useful in method chains. The `x` passed\n to the ``lambda`` is the DataFrame being sliced. This selects\n the rows whose index label even.\n\n >>> df.iloc[lambda x: x.index % 2 == 0]\n a b c d\n 0 1 2 3 4\n 2 1000 2000 3000 4000\n\n **Indexing both axes**\n\n You can mix the indexer types for the index and columns. Use ``:`` to\n select the entire axis.\n\n With scalar integers.\n\n >>> df.iloc[0, 1]\n 2\n\n With lists of integers.\n\n >>> df.iloc[[0, 2], [1, 3]]\n b d\n 0 2 4\n 2 2000 4000\n\n With `slice` objects.\n\n >>> df.iloc[1:3, 0:3]\n a b c\n 1 100 200 300\n 2 1000 2000 3000\n\n With a boolean array whose length matches the columns.\n\n >>> df.iloc[:, [True, False, True, False]]\n a c\n 0 1 3\n 1 100 300\n 2 1000 3000\n\n With a callable function that expects the Series or DataFrame.\n\n >>> df.iloc[:, lambda df: [0, 2]]\n a c\n 0 1 3\n 1 100 300\n 2 1000 3000\n ", "language": "en", "n_whitespaces": 1603, "n_words": 527, "vocab_size": 251 }
7
Python
7
e7afa4b641b146874d17c36caa8a050bfde31283
indexing.py
168,154
137
15
iloc
https://github.com/pandas-dev/pandas.git
DOC: Add tuple description to allowed inputs for iloc #47799 (#47989) DOC: Add tuple description to allowed inputs for iloc
21
0
40,218
8
3
6
def set_task_factory(self, factory): if factory is not None and not callable(factory): raise TypeError('task factory must be a callable or None') self._task_factory = factory
python3.10.4/Lib/asyncio/base_events.py
52
XX-Net
{ "docstring": "Set a task factory that will be used by loop.create_task().\n\n If factory is None the default task factory will be set.\n\n If factory is a callable, it should have a signature matching\n '(loop, coro)', where 'loop' will be a reference to the active\n event loop, 'coro' will be a coroutine object. The callable\n must return a Future.\n ", "language": "en", "n_whitespaces": 100, "n_words": 57, "vocab_size": 39 }
23
Python
20
8198943edd73a363c266633e1aa5b2a9e9c9f526
base_events.py
220,322
4
30
set_task_factory
https://github.com/XX-net/XX-Net.git
add python 3.10.4 for windows
55
0
55,965
10
1
4
def ports(self): return self.runtime_args.port
jina/serve/gateway.py
22
jina
{ "docstring": "Gets all the list of ports from the runtime_args as a list.\n :return: The lists of ports to be exposed\n ", "language": "en", "n_whitespaces": 34, "n_words": 20, "vocab_size": 17 }
4
Python
4
e4b930e6369f1ec69b07af6190d61aa3cb3d9cec
gateway.py
13,573
2
12
ports
https://github.com/jina-ai/jina.git
refactor: add properties to gateway (#5417)
18
0
2,687
7
3
9
def _set_level(self, value=None): if value is None and hasattr(self, "_level"): del self._level else: self._level = int(value) level = property(_get_level, _set_level, _set_level)
django/contrib/messages/storage/base.py
75
django
{ "docstring": "\n Set a custom minimum recorded level.\n\n If set to ``None``, the default level will be used (see the\n ``_get_level`` method).\n ", "language": "en", "n_whitespaces": 49, "n_words": 20, "vocab_size": 19 }
21
Python
19
9c19aff7c7561e3a82978a272ecdaad40dda5c00
base.py
204,172
5
36
_set_level
https://github.com/django/django.git
Refs #33476 -- Reformatted code with Black.
67
0
50,669
11
3
5
def _as_graph_element(obj): conv_fn = getattr(obj, "_as_graph_element", None) if conv_fn and callable(conv_fn): return conv_fn() return None
keras/backend.py
52
keras
{ "docstring": "Convert `obj` to a graph element if possible, otherwise return `None`.\n\n Args:\n obj: Object to convert.\n\n Returns:\n The result of `obj._as_graph_element()` if that method is available;\n otherwise `None`.\n ", "language": "en", "n_whitespaces": 56, "n_words": 28, "vocab_size": 24 }
15
Python
13
84afc5193d38057e2e2badf9c889ea87d80d8fbf
backend.py
269,626
5
30
_as_graph_element
https://github.com/keras-team/keras.git
Reformatting the codebase with black. PiperOrigin-RevId: 450093126
34
0
80,244
9
3
8
def _find_gcs_address_or_die(): gcs_addresses = _find_address_from_flag("--gcs-address") if len(gcs_addresses) > 1: raise ConnectionError( f"Found multiple active Ray instances: {gcs_addresses}. " "Please specify the one to connect to by setting `--address` flag " "or `RAY_ADDRESS` environment variable.") sys.exit(1) elif not gcs_addresses: raise ConnectionError( "Could not find any running Ray instance. " "Please specify the one to connect to by setting `--address` flag " "or `RAY_ADDRESS` environment variable.") return gcs_addresses.pop()
python/ray/_private/services.py
102
ray
{ "docstring": "Find one GCS address unambiguously, or raise an error.\n\n Callers outside of this module should use get_ray_address_to_use_or_die()\n ", "language": "en", "n_whitespaces": 23, "n_words": 17, "vocab_size": 17 }
66
Python
43
70db5c5592d94b611fee0a334414f1f4f5cc151a
services.py
128,999
14
50
_find_gcs_address_or_die
https://github.com/ray-project/ray.git
[GCS][Bootstrap n/n] Do not start Redis in GCS bootstrapping mode (#21232) After this change in GCS bootstrapping mode, Redis no longer starts and `address` is treated as the GCS address of the Ray cluster. Co-authored-by: Yi Cheng <[email protected]> Co-authored-by: Yi Cheng <[email protected]>
168
0
28,869
12
15
39
def get_tax_template(posting_date, args): args = frappe._dict(args) conditions = [] if posting_date: conditions.append( f ) else: conditions.append("(from_date is null) and (to_date is null)") conditions.append( "ifnull(tax_category, '') = {0}".format(frappe.db.escape(cstr(args.get("tax_category")))) ) if "tax_category" in args.keys(): del args["tax_category"] for key, value in args.items(): if key == "use_for_shopping_cart": conditions.append("use_for_shopping_cart = {0}".format(1 if value else 0)) elif key == "customer_group": if not value: value = get_root_of("Customer Group") customer_group_condition = get_customer_group_condition(value) conditions.append("ifnull({0}, '') in ('', {1})".format(key, customer_group_condition)) else: conditions.append("ifnull({0}, '') in ('', {1})".format(key, frappe.db.escape(cstr(value)))) tax_rule = frappe.db.sql( .format( " and ".join(conditions) ), as_dict=True, ) if not tax_rule: return None for rule in tax_rule: rule.no_of_keys_matched = 0 for key in args: if rule.get(key): rule.no_of_keys_matched += 1 def cmp(a, b): # refernce: https://docs.python.org/3.0/whatsnew/3.0.html#ordering-comparisons return int(a > b) - int(a < b) rule = sorted( tax_rule, key=functools.cmp_to_key( lambda b, a: cmp(a.no_of_keys_matched, b.no_of_keys_matched) or cmp(a.priority, b.priority) ), )[0] tax_template = rule.sales_tax_template or rule.purchase_tax_template doctype = "{0} Taxes and Charges Template".format(rule.tax_type) if frappe.db.get_value(doctype, tax_template, "disabled") == 1: return None return tax_template
erpnext/accounts/doctype/tax_rule/tax_rule.py
559
erpnext
{ "docstring": "Get matching tax rule(from_date is null or from_date <= '{posting_date}')\n\t\t\tand (to_date is null or to_date >= '{posting_date}')select * from `tabTax Rule`\n\t\twhere {0}", "language": "en", "n_whitespaces": 21, "n_words": 24, "vocab_size": 21 }
159
Python
103
05dd1d6d15c6c8c66165e9f267078c3cf9aec10e
tax_rule.py
68,528
51
312
get_tax_template
https://github.com/frappe/erpnext.git
refactor: tax rule validity query (#30934)
108
0
14,812
18
1
7
def apothem(self): return self.radius * cos(S.Pi/self._n)
sympy/geometry/polygon.py
36
sympy
{ "docstring": "The inradius of the RegularPolygon.\n\n The apothem/inradius is the radius of the inscribed circle.\n\n Returns\n =======\n\n apothem : number or instance of Basic\n\n See Also\n ========\n\n sympy.geometry.line.Segment.length, sympy.geometry.ellipse.Circle.radius\n\n Examples\n ========\n\n >>> from sympy import Symbol\n >>> from sympy import RegularPolygon, Point\n >>> radius = Symbol('r')\n >>> rp = RegularPolygon(Point(0, 0), radius, 4)\n >>> rp.apothem\n sqrt(2)*r/2\n\n ", "language": "en", "n_whitespaces": 167, "n_words": 55, "vocab_size": 40 }
6
Python
6
498015021131af4dbb07eb110e5badaba8250c7b
polygon.py
196,303
2
21
apothem
https://github.com/sympy/sympy.git
Updated import locations
20
0
47,803
10
1
5
def seek(self, offset, whence=0): # type: (int, int) -> int return self._file.seek(offset, whence)
.venv/lib/python3.8/site-packages/pip/_internal/network/lazy_wheel.py
37
transferlearning
{ "docstring": "Change stream position and return the new absolute position.\n\n Seek to offset relative position indicated by whence:\n * 0: Start of stream (the default). pos should be >= 0;\n * 1: Current position - pos may be negative;\n * 2: End of stream - pos usually negative.\n ", "language": "en", "n_whitespaces": 83, "n_words": 47, "vocab_size": 36 }
13
Python
13
f638f5d0e6c8ebed0e69a6584bc7f003ec646580
lazy_wheel.py
60,894
2
23
seek
https://github.com/jindongwang/transferlearning.git
upd; format
34
0
12,326
8
1
19
def test_python_render_indent_guides(): syntax = Panel.fit( Syntax( CODE, lexer="python", line_numbers=True, line_range=(2, 10), theme="default", code_width=60, word_wrap=True, indent_guides=True, ), padding=0, ) rendered_syntax = render(syntax) print(repr(rendered_syntax)) expected = '╭────────────────────────────────────────────────────────────────╮\n│\x1b[1;38;2;24;24;24;48;2;248;248;248m \x1b[0m\x1b[38;2;173;173;173;48;2;248;248;248m 2 \x1b[0m\x1b[2;3;38;2;64;128;128;48;2;248;248;248m│ \x1b[0m\x1b[3;38;2;186;33;33;48;2;248;248;248m\x1b[0m\x1b[48;2;248;248;248m \x1b[0m│\n│\x1b[1;38;2;24;24;24;48;2;248;248;248m \x1b[0m\x1b[38;2;173;173;173;48;2;248;248;248m 3 \x1b[0m\x1b[2;3;38;2;64;128;128;48;2;248;248;248m│ \x1b[0m\x1b[38;2;0;0;0;48;2;248;248;248miter_values\x1b[0m\x1b[38;2;0;0;0;48;2;248;248;248m \x1b[0m\x1b[38;2;102;102;102;48;2;248;248;248m=\x1b[0m\x1b[38;2;0;0;0;48;2;248;248;248m \x1b[0m\x1b[38;2;0;128;0;48;2;248;248;248miter\x1b[0m\x1b[38;2;0;0;0;48;2;248;248;248m(\x1b[0m\x1b[38;2;0;0;0;48;2;248;248;248mvalues\x1b[0m\x1b[38;2;0;0;0;48;2;248;248;248m)\x1b[0m\x1b[48;2;248;248;248m \x1b[0m│\n│\x1b[1;38;2;24;24;24;48;2;248;248;248m \x1b[0m\x1b[38;2;173;173;173;48;2;248;248;248m 4 \x1b[0m\x1b[2;3;38;2;64;128;128;48;2;248;248;248m│ \x1b[0m\x1b[1;38;2;0;128;0;48;2;248;248;248mtry\x1b[0m\x1b[38;2;0;0;0;48;2;248;248;248m:\x1b[0m\x1b[48;2;248;248;248m \x1b[0m│\n│\x1b[1;38;2;24;24;24;48;2;248;248;248m \x1b[0m\x1b[38;2;173;173;173;48;2;248;248;248m 5 \x1b[0m\x1b[2;3;38;2;64;128;128;48;2;248;248;248m│ │ \x1b[0m\x1b[38;2;0;0;0;48;2;248;248;248mprevious_value\x1b[0m\x1b[38;2;0;0;0;48;2;248;248;248m \x1b[0m\x1b[38;2;102;102;102;48;2;248;248;248m=\x1b[0m\x1b[38;2;0;0;0;48;2;248;248;248m \x1b[0m\x1b[38;2;0;128;0;48;2;248;248;248mnext\x1b[0m\x1b[38;2;0;0;0;48;2;248;248;248m(\x1b[0m\x1b[38;2;0;0;0;48;2;248;248;248miter_values\x1b[0m\x1b[38;2;0;0;0;48;2;248;248;248m)\x1b[0m\x1b[48;2;248;248;248m \x1b[0m│\n│\x1b[1;38;2;24;24;24;48;2;248;248;248m \x1b[0m\x1b[38;2;173;173;173;48;2;248;248;248m 6 \x1b[0m\x1b[2;3;38;2;64;128;128;48;2;248;248;248m│ \x1b[0m\x1b[1;38;2;0;128;0;48;2;248;248;248mexcept\x1b[0m\x1b[38;2;0;0;0;48;2;248;248;248m \x1b[0m\x1b[1;38;2;210;65;58;48;2;248;248;248mStopIteration\x1b[0m\x1b[38;2;0;0;0;48;2;248;248;248m:\x1b[0m\x1b[48;2;248;248;248m \x1b[0m│\n│\x1b[1;38;2;24;24;24;48;2;248;248;248m \x1b[0m\x1b[38;2;173;173;173;48;2;248;248;248m 7 \x1b[0m\x1b[2;3;38;2;64;128;128;48;2;248;248;248m│ │ \x1b[0m\x1b[1;38;2;0;128;0;48;2;248;248;248mreturn\x1b[0m\x1b[48;2;248;248;248m \x1b[0m│\n│\x1b[1;38;2;24;24;24;48;2;248;248;248m \x1b[0m\x1b[38;2;173;173;173;48;2;248;248;248m 8 \x1b[0m\x1b[2;3;38;2;64;128;128;48;2;248;248;248m│ \x1b[0m\x1b[38;2;0;0;0;48;2;248;248;248mfirst\x1b[0m\x1b[38;2;0;0;0;48;2;248;248;248m \x1b[0m\x1b[38;2;102;102;102;48;2;248;248;248m=\x1b[0m\x1b[38;2;0;0;0;48;2;248;248;248m \x1b[0m\x1b[1;38;2;0;128;0;48;2;248;248;248mTrue\x1b[0m\x1b[48;2;248;248;248m \x1b[0m│\n│\x1b[1;38;2;24;24;24;48;2;248;248;248m \x1b[0m\x1b[38;2;173;173;173;48;2;248;248;248m 9 \x1b[0m\x1b[2;3;38;2;64;128;128;48;2;248;248;248m│ \x1b[0m\x1b[1;38;2;0;128;0;48;2;248;248;248mfor\x1b[0m\x1b[38;2;0;0;0;48;2;248;248;248m \x1b[0m\x1b[38;2;0;0;0;48;2;248;248;248mvalue\x1b[0m\x1b[38;2;0;0;0;48;2;248;248;248m \x1b[0m\x1b[1;38;2;170;34;255;48;2;248;248;248min\x1b[0m\x1b[38;2;0;0;0;48;2;248;248;248m \x1b[0m\x1b[38;2;0;0;0;48;2;248;248;248miter_values\x1b[0m\x1b[38;2;0;0;0;48;2;248;248;248m:\x1b[0m\x1b[48;2;248;248;248m \x1b[0m│\n│\x1b[1;38;2;24;24;24;48;2;248;248;248m \x1b[0m\x1b[38;2;173;173;173;48;2;248;248;248m10 \x1b[0m\x1b[2;3;38;2;64;128;128;48;2;248;248;248m│ │ \x1b[0m\x1b[1;38;2;0;128;0;48;2;248;248;248myield\x1b[0m\x1b[38;2;0;0;0;48;2;248;248;248m \x1b[0m\x1b[38;2;0;0;0;48;2;248;248;248mfirst\x1b[0m\x1b[38;2;0;0;0;48;2;248;248;248m,\x1b[0m\x1b[38;2;0;0;0;48;2;248;248;248m \x1b[0m\x1b[1;38;2;0;128;0;48;2;248;248;248mFalse\x1b[0m\x1b[38;2;0;0;0;48;2;248;248;248m,\x1b[0m\x1b[38;2;0;0;0;48;2;248;248;248m \x1b[0m\x1b[38;2;0;0;0;48;2;248;248;248mprevious_value\x1b[0m\x1b[48;2;248;248;248m \x1b[0m│\n╰────────────────────────────────────────────────────────────────╯\n' assert rendered_syntax == expected
tests/test_syntax.py
294
rich
{ "docstring": "Iterate and generate a tuple with a flag for first \\x1b[0m\\x1b[48;2;248;248;248m \\x1b[0m│\\n│\\x1b[48;2;248;248;248m \\x1b[0m\\x1b[3;38;2;186;33;33;48;2;248;248;248mand last value.", "language": "en", "n_whitespaces": 18, "n_words": 15, "vocab_size": 14 }
89
Python
59
2cf35b18a8716c963c0f9252544a3a8b9881cd6c
test_syntax.py
161,336
18
73
test_python_render_indent_guides
https://github.com/Textualize/rich.git
Try using default theme in test to avoid ubuntu/macos variance
546
0
38,965
12
8
12
async def install(cls): for field in cls.__fields__.values(): if Block.is_block_class(field.type_): await field.type_.install() if get_origin(field.type_) is Union: for type in get_args(field.type_): if Block.is_block_class(type): await type.install()
src/prefect/blocks/core.py
116
prefect
{ "docstring": "\n Makes block available for configuration with current Orion server.\n Recursively installs all nested blocks. Installation is idempotent.\n ", "language": "en", "n_whitespaces": 39, "n_words": 17, "vocab_size": 17 }
23
Python
18
6a1cec80715c2b633362403a6be9470fc70c31e8
core.py
56,175
25
160
install
https://github.com/PrefectHQ/prefect.git
Makes block installation recursive
131
0
11,450
16
1
12
def test_json_to_doc_validation_error(doc): doc_json = doc.to_json() doc_json.pop("tokens") with pytest.raises(ValueError): Doc(doc.vocab).from_json(doc_json, validate=True)
spacy/tests/doc/test_json_doc_conversion.py
75
spaCy
{ "docstring": "Test that Doc.from_json() raises an exception when validating invalid input.", "language": "en", "n_whitespaces": 9, "n_words": 10, "vocab_size": 10 }
10
Python
10
8387ce4c01db48d92ac5638e18316c0f1fc8861e
test_json_doc_conversion.py
111,418
5
42
test_json_to_doc_validation_error
https://github.com/explosion/spaCy.git
Add Doc.from_json() (#10688) * Implement Doc.from_json: rough draft. * Implement Doc.from_json: first draft with tests. * Implement Doc.from_json: added documentation on website for Doc.to_json(), Doc.from_json(). * Implement Doc.from_json: formatting changes. * Implement Doc.to_json(): reverting unrelated formatting changes. * Implement Doc.to_json(): fixing entity and span conversion. Moving fixture and doc <-> json conversion tests into single file. * Implement Doc.from_json(): replaced entity/span converters with doc.char_span() calls. * Implement Doc.from_json(): handling sentence boundaries in spans. * Implementing Doc.from_json(): added parser-free sentence boundaries transfer. * Implementing Doc.from_json(): added parser-free sentence boundaries transfer. * Implementing Doc.from_json(): incorporated various PR feedback. * Renaming fixture for document without dependencies. Co-authored-by: Adriane Boyd <[email protected]> * Implementing Doc.from_json(): using two sent_starts instead of one. Co-authored-by: Adriane Boyd <[email protected]> * Implementing Doc.from_json(): doc_without_dependency_parser() -> doc_without_deps. Co-authored-by: Adriane Boyd <[email protected]> * Implementing Doc.from_json(): incorporating various PR feedback. Rebased on latest master. * Implementing Doc.from_json(): refactored Doc.from_json() to work with annotation IDs instead of their string representations. * Implement Doc.from_json(): reverting unwanted formatting/rebasing changes. * Implement Doc.from_json(): added check for char_span() calculation for entities. * Update spacy/tokens/doc.pyx Co-authored-by: Adriane Boyd <[email protected]> * Implement Doc.from_json(): minor refactoring, additional check for token attribute consistency with corresponding test. * Implement Doc.from_json(): removed redundancy in annotation type key naming. Co-authored-by: Adriane Boyd <[email protected]> * Implement Doc.from_json(): Simplifying setting annotation values. Co-authored-by: Adriane Boyd <[email protected]> * Implement doc.from_json(): renaming annot_types to token_attrs. Co-authored-by: Adriane Boyd <[email protected]> * Implement Doc.from_json(): adjustments for renaming of annot_types to token_attrs. * Implement Doc.from_json(): removing default categories. Co-authored-by: Adriane Boyd <[email protected]> * Implement Doc.from_json(): simplifying lexeme initialization. Co-authored-by: Adriane Boyd <[email protected]> * Implement Doc.from_json(): simplifying lexeme initialization. Co-authored-by: Adriane Boyd <[email protected]> * Implement Doc.from_json(): refactoring to only have keys for present annotations. * Implement Doc.from_json(): fix check for tokens' HEAD attributes. Co-authored-by: Adriane Boyd <[email protected]> * Implement Doc.from_json(): refactoring Doc.from_json(). * Implement Doc.from_json(): fixing span_group retrieval. Co-authored-by: Adriane Boyd <[email protected]> * Implement Doc.from_json(): fixing span retrieval. * Implement Doc.from_json(): added schema for Doc JSON format. Minor refactoring in Doc.from_json(). * Implement Doc.from_json(): added comment regarding Token and Span extension support. * Implement Doc.from_json(): renaming inconsistent_props to partial_attrs.. Co-authored-by: Adriane Boyd <[email protected]> * Implement Doc.from_json(): adjusting error message. Co-authored-by: Adriane Boyd <[email protected]> * Implement Doc.from_json(): extending E1038 message. Co-authored-by: Adriane Boyd <[email protected]> * Implement Doc.from_json(): added params to E1038 raises. * Implement Doc.from_json(): combined attribute collection with partial attributes check. * Implement Doc.from_json(): added optional schema validation. * Implement Doc.from_json(): fixed optional fields in schema, tests. * Implement Doc.from_json(): removed redundant None check for DEP. * Implement Doc.from_json(): added passing of schema validatoin message to E1037.. * Implement Doc.from_json(): removing redundant error E1040. Co-authored-by: Adriane Boyd <[email protected]> * Implement Doc.from_json(): changing message for E1037. Co-authored-by: Adriane Boyd <[email protected]> * Implement Doc.from_json(): adjusted website docs and docstring of Doc.from_json(). * Update spacy/tests/doc/test_json_doc_conversion.py * Implement Doc.from_json(): docstring update. Co-authored-by: Adriane Boyd <[email protected]> * Implement Doc.from_json(): docstring update. Co-authored-by: Adriane Boyd <[email protected]> * Implement Doc.from_json(): website docs update. Co-authored-by: Adriane Boyd <[email protected]> * Implement Doc.from_json(): docstring formatting. Co-authored-by: Adriane Boyd <[email protected]> * Implement Doc.from_json(): docstring formatting. Co-authored-by: Adriane Boyd <[email protected]> * Implement Doc.from_json(): fixing Doc reference in website docs. Co-authored-by: Adriane Boyd <[email protected]> * Implement Doc.from_json(): reformatted website/docs/api/doc.md. * Implement Doc.from_json(): bumped IDs of new errors to avoid merge conflicts. * Implement Doc.from_json(): fixing bug in tests. Co-authored-by: Adriane Boyd <[email protected]> * Implement Doc.from_json(): fix setting of sentence starts for docs without DEP. * Implement Doc.from_json(): add check for valid char spans when manually setting sentence boundaries. Refactor sentence boundary setting slightly. Move error message for lack of support for partial token annotations to errors.py. * Implement Doc.from_json(): simplify token sentence start manipulation. Co-authored-by: Adriane Boyd <[email protected]> * Combine related error messages * Update spacy/tests/doc/test_json_doc_conversion.py Co-authored-by: Adriane Boyd <[email protected]>
29
0
24,401
12
1
15
def is_sequence_right_padded(mask): max_seq_length = tf.shape(mask)[1] count_of_true = tf.reduce_sum(tf.cast(mask, tf.int32), axis=1) right_padded_mask = tf.sequence_mask( count_of_true, maxlen=max_seq_length) return tf.reduce_all(tf.equal(mask, right_padded_mask))
keras/layers/rnn/gru_lstm_utils.py
100
keras
{ "docstring": "Check the mask tensor and see if it right padded.\n\n For cuDNN kernel, it uses the sequence length param to skip the tailing\n timestep. If the data is left padded, or not a strict right padding (has\n masked value in the middle of the sequence), then cuDNN kernel won't be work\n properly in those cases.\n\n Left padded data: [[False, False, True, True, True]].\n Right padded data: [[True, True, True, False, False]].\n Mixture of mask/unmasked data: [[True, False, True, False, False]].\n\n Note that for the mixed data example above, the actually data RNN should see\n are those 2 Trues (index 0 and 2), the index 1 False should be ignored and not\n pollute the internal states.\n\n Args:\n mask: the Boolean tensor with shape [batch, timestep]\n\n Returns:\n boolean scalar tensor, whether the mask is strictly right padded.\n ", "language": "en", "n_whitespaces": 154, "n_words": 135, "vocab_size": 93 }
18
Python
16
01c906c4178db5ae03b7eb2d298a052c952a0667
gru_lstm_utils.py
268,968
6
64
is_sequence_right_padded
https://github.com/keras-team/keras.git
Reorganize RNN layers, cells and wrappers into smaller logically organized files hosted under an `rnn` directory. PiperOrigin-RevId: 428841673
28
0
79,793
11
1
7
def query_cursor_left(self) -> bool: previous_index = self.cursor_index new_index = max(0, previous_index - 1) return previous_index != new_index
src/textual/widgets/text_input.py
45
textual
{ "docstring": "Check if the cursor can move 1 character left in the text", "language": "en", "n_whitespaces": 11, "n_words": 12, "vocab_size": 11 }
17
Python
13
9e25752c859d25c172697236b94997a38c0799bf
text_input.py
183,346
5
27
query_cursor_left
https://github.com/Textualize/textual.git
Scrolling within text input
45
0
44,157
9
15
54
def resample(*arrays, replace=True, n_samples=None, random_state=None, stratify=None): max_n_samples = n_samples random_state = check_random_state(random_state) if len(arrays) == 0: return None first = arrays[0] n_samples = first.shape[0] if hasattr(first, "shape") else len(first) if max_n_samples is None: max_n_samples = n_samples elif (max_n_samples > n_samples) and (not replace): raise ValueError( "Cannot sample %d out of arrays with dim %d when replace is False" % (max_n_samples, n_samples) ) check_consistent_length(*arrays) if stratify is None: if replace: indices = random_state.randint(0, n_samples, size=(max_n_samples,)) else: indices = np.arange(n_samples) random_state.shuffle(indices) indices = indices[:max_n_samples] else: # Code adapted from StratifiedShuffleSplit() y = check_array(stratify, ensure_2d=False, dtype=None) if y.ndim == 2: # for multi-label y, map each distinct row to a string repr # using join because str(row) uses an ellipsis if len(row) > 1000 y = np.array([" ".join(row.astype("str")) for row in y]) classes, y_indices = np.unique(y, return_inverse=True) n_classes = classes.shape[0] class_counts = np.bincount(y_indices) # Find the sorted list of instances for each class: # (np.unique above performs a sort, so code is O(n logn) already) class_indices = np.split( np.argsort(y_indices, kind="mergesort"), np.cumsum(class_counts)[:-1] ) n_i = _approximate_mode(class_counts, max_n_samples, random_state) indices = [] for i in range(n_classes): indices_i = random_state.choice(class_indices[i], n_i[i], replace=replace) indices.extend(indices_i) indices = random_state.permutation(indices) # convert sparse matrices to CSR for row-based indexing arrays = [a.tocsr() if issparse(a) else a for a in arrays] resampled_arrays = [_safe_indexing(a, indices) for a in arrays] if len(resampled_arrays) == 1: # syntactic sugar for the unit argument case return resampled_arrays[0] else: return resampled_arrays
sklearn/utils/__init__.py
579
scikit-learn
{ "docstring": "Resample arrays or sparse matrices in a consistent way.\n\n The default strategy implements one step of the bootstrapping\n procedure.\n\n Parameters\n ----------\n *arrays : sequence of array-like of shape (n_samples,) or \\\n (n_samples, n_outputs)\n Indexable data-structures can be arrays, lists, dataframes or scipy\n sparse matrices with consistent first dimension.\n\n replace : bool, default=True\n Implements resampling with replacement. If False, this will implement\n (sliced) random permutations.\n\n n_samples : int, default=None\n Number of samples to generate. If left to None this is\n automatically set to the first dimension of the arrays.\n If replace is False it should not be larger than the length of\n arrays.\n\n random_state : int, RandomState instance or None, default=None\n Determines random number generation for shuffling\n the data.\n Pass an int for reproducible results across multiple function calls.\n See :term:`Glossary <random_state>`.\n\n stratify : array-like of shape (n_samples,) or (n_samples, n_outputs), \\\n default=None\n If not None, data is split in a stratified fashion, using this as\n the class labels.\n\n Returns\n -------\n resampled_arrays : sequence of array-like of shape (n_samples,) or \\\n (n_samples, n_outputs)\n Sequence of resampled copies of the collections. The original arrays\n are not impacted.\n\n See Also\n --------\n shuffle : Shuffle arrays or sparse matrices in a consistent way.\n\n Examples\n --------\n It is possible to mix sparse and dense arrays in the same run::\n\n >>> import numpy as np\n >>> X = np.array([[1., 0.], [2., 1.], [0., 0.]])\n >>> y = np.array([0, 1, 2])\n\n >>> from scipy.sparse import coo_matrix\n >>> X_sparse = coo_matrix(X)\n\n >>> from sklearn.utils import resample\n >>> X, X_sparse, y = resample(X, X_sparse, y, random_state=0)\n >>> X\n array([[1., 0.],\n [2., 1.],\n [1., 0.]])\n\n >>> X_sparse\n <3x2 sparse matrix of type '<... 'numpy.float64'>'\n with 4 stored elements in Compressed Sparse Row format>\n\n >>> X_sparse.toarray()\n array([[1., 0.],\n [2., 1.],\n [1., 0.]])\n\n >>> y\n array([0, 1, 0])\n\n >>> resample(y, n_samples=2, random_state=0)\n array([0, 1])\n\n Example using stratification::\n\n >>> y = [0, 0, 1, 1, 1, 1, 1, 1, 1]\n >>> resample(y, n_samples=5, replace=False, stratify=y,\n ... random_state=0)\n [1, 1, 1, 0, 1]\n ", "language": "en", "n_whitespaces": 705, "n_words": 329, "vocab_size": 188 }
235
Python
156
82cd3d74f252e7d4c5e733b530897d499d5d640b
__init__.py
260,482
44
365
resample
https://github.com/scikit-learn/scikit-learn.git
DOC numpydoc validation for `resample` function (#23916)
576
0
76,278
19
6
18
def resolve_model_string(model_string, default_app=None): if isinstance(model_string, str): try: app_label, model_name = model_string.split(".") except ValueError: if default_app is not None: # If we can't split, assume a model in current app app_label = default_app model_name = model_string else: raise ValueError( "Can not resolve {0!r} into a model. Model names " "should be in the form app_label.model_name".format(model_string), model_string, ) return apps.get_model(app_label, model_name) elif isinstance(model_string, type) and issubclass(model_string, Model): return model_string else: raise ValueError( "Can not resolve {0!r} into a model".format(model_string), model_string ) SCRIPT_RE = re.compile(r"<(-*)/script>")
wagtail/core/utils.py
183
wagtail
{ "docstring": "\n Resolve an 'app_label.model_name' string into an actual model class.\n If a model class is passed in, just return that.\n\n Raises a LookupError if a model can not be found, or ValueError if passed\n something that is neither a model or a string.\n ", "language": "en", "n_whitespaces": 58, "n_words": 42, "vocab_size": 30 }
82
Python
59
d10f15e55806c6944827d801cd9c2d53f5da4186
utils.py
74,693
21
101
resolve_model_string
https://github.com/wagtail/wagtail.git
Reformat with black
311
0
16,297
19
3
11
def trigger(self, sender, event, data=None): if not self.figure.canvas.widgetlock.available(sender): return if data is not None: self.draw_rubberband(*data) else: self.remove_rubberband()
lib/matplotlib/backend_tools.py
82
matplotlib
{ "docstring": "Call `draw_rubberband` or `remove_rubberband` based on data.", "language": "en", "n_whitespaces": 6, "n_words": 7, "vocab_size": 7 }
17
Python
15
d86a5050b57fc2f3f95d23d94f6c64f86dac2cd3
backend_tools.py
109,085
7
50
trigger
https://github.com/matplotlib/matplotlib.git
Fix method subclassing inconsistencies
78
0
23,431
11
4
7
def default_device(dev=None): if ivy.exists(dev): _assert_dev_correct_formatting(dev) return dev global default_device_stack if not default_device_stack: default_device_stack = ['gpu:0'] if ivy.gpu_is_available() else ['cpu'] return default_device_stack[-1] # noinspection PyShadowingNames
ivy/core/device.py
86
ivy
{ "docstring": "\n Return the input dev if provided, otherwise return the global default device.\n ", "language": "en", "n_whitespaces": 19, "n_words": 12, "vocab_size": 11 }
24
Python
20
d743336b1f3654cd0315f380f43eed4116997c1d
device.py
213,600
8
49
default_device
https://github.com/unifyai/ivy.git
renamed dev_str arg to dev for all methods.
59
0
53,669
11
5
22
def save_optimizer_weights_to_hdf5_group(hdf5_group, optimizer): symbolic_weights = getattr(optimizer, "weights") if symbolic_weights: weights_group = hdf5_group.create_group("optimizer_weights") weight_names = [str(w.name).encode("utf8") for w in symbolic_weights] save_attributes_to_hdf5_group( weights_group, "weight_names", weight_names ) weight_values = backend.batch_get_value(symbolic_weights) for name, val in zip(weight_names, weight_values): param_dset = weights_group.create_dataset( name, val.shape, dtype=val.dtype ) if not val.shape: # scalar param_dset[()] = val else: param_dset[:] = val
keras/saving/hdf5_format.py
185
keras
{ "docstring": "Saves optimizer weights of a optimizer to a HDF5 group.\n\n Args:\n hdf5_group: HDF5 group.\n optimizer: optimizer instance.\n ", "language": "en", "n_whitespaces": 37, "n_words": 17, "vocab_size": 12 }
52
Python
38
84afc5193d38057e2e2badf9c889ea87d80d8fbf
hdf5_format.py
275,861
17
113
save_optimizer_weights_to_hdf5_group
https://github.com/keras-team/keras.git
Reformatting the codebase with black. PiperOrigin-RevId: 450093126
218
0
81,486
15
8
7
def powdenest(eq, force=False, polar=False): r from sympy.simplify.simplify import posify if force:
sympy/simplify/powsimp.py
39
sympy
{ "docstring": "\n Collect exponents on powers as assumptions allow.\n\n Explanation\n ===========\n\n Given ``(bb**be)**e``, this can be simplified as follows:\n * if ``bb`` is positive, or\n * ``e`` is an integer, or\n * ``|be| < 1`` then this simplifies to ``bb**(be*e)``\n\n Given a product of powers raised to a power, ``(bb1**be1 *\n bb2**be2...)**e``, simplification can be done as follows:\n\n - if e is positive, the gcd of all bei can be joined with e;\n - all non-negative bb can be separated from those that are negative\n and their gcd can be joined with e; autosimplification already\n handles this separation.\n - integer factors from powers that have integers in the denominator\n of the exponent can be removed from any term and the gcd of such\n integers can be joined with e\n\n Setting ``force`` to ``True`` will make symbols that are not explicitly\n negative behave as though they are positive, resulting in more\n denesting.\n\n Setting ``polar`` to ``True`` will do simplifications on the Riemann surface of\n the logarithm, also resulting in more denestings.\n\n When there are sums of logs in exp() then a product of powers may be\n obtained e.g. ``exp(3*(log(a) + 2*log(b)))`` - > ``a**3*b**6``.\n\n Examples\n ========\n\n >>> from sympy.abc import a, b, x, y, z\n >>> from sympy import Symbol, exp, log, sqrt, symbols, powdenest\n\n >>> powdenest((x**(2*a/3))**(3*x))\n (x**(2*a/3))**(3*x)\n >>> powdenest(exp(3*x*log(2)))\n 2**(3*x)\n\n Assumptions may prevent expansion:\n\n >>> powdenest(sqrt(x**2))\n sqrt(x**2)\n\n >>> p = symbols('p', positive=True)\n >>> powdenest(sqrt(p**2))\n p\n\n No other expansion is done.\n\n >>> i, j = symbols('i,j', integer=True)\n >>> powdenest((x**x)**(i + j)) # -X-> (x**x)**i*(x**x)**j\n x**(x*(i + j))\n\n But exp() will be denested by moving all non-log terms outside of\n the function; this may result in the collapsing of the exp to a power\n with a different base:\n\n >>> powdenest(exp(3*y*log(x)))\n x**(3*y)\n >>> powdenest(exp(y*(log(a) + log(b))))\n (a*b)**y\n >>> powdenest(exp(3*(log(a) + log(b))))\n a**3*b**3\n\n If assumptions allow, symbols can also be moved to the outermost exponent:\n\n >>> i = Symbol('i', integer=True)\n >>> powdenest(((x**(2*i))**(3*y))**x)\n ((x**(2*i))**(3*y))**x\n >>> powdenest(((x**(2*i))**(3*y))**x, force=True)\n x**(6*i*x*y)\n\n >>> powdenest(((x**(2*a/3))**(3*y/i))**x)\n ((x**(2*a/3))**(3*y/i))**x\n >>> powdenest((x**(2*i)*y**(4*i))**z, force=True)\n (x*y**2)**(2*i*z)\n\n >>> n = Symbol('n', negative=True)\n\n >>> powdenest((x**i)**y, force=True)\n x**(i*y)\n >>> powdenest((n**i)**x, force=True)\n (n**i)**x\n\n ", "language": "en", "n_whitespaces": 560, "n_words": 341, "vocab_size": 199 }
11
Python
11
2a1afca9477eb781f16d5d6b63fa37abed7740a3
powsimp.py
198,305
110
182
powdenest
https://github.com/sympy/sympy.git
Use sympify less
19
0
48,865
6
4
15
def set_anchor(self, anchor): if isinstance(anchor, str): _api.check_in_list(mtransforms.Bbox.coefs, anchor=anchor) elif not isinstance(anchor, (tuple, list)) or len(anchor) != 2: raise TypeError("anchor must be str or 2-tuple") self._anchor = anchor
lib/mpl_toolkits/axes_grid1/axes_divider.py
97
matplotlib
{ "docstring": "\n Parameters\n ----------\n anchor : (float, float) or {'C', 'SW', 'S', 'SE', 'E', 'NE', 'N', \\\n'NW', 'W'}\n Either an (*x*, *y*) pair of relative coordinates (0 is left or\n bottom, 1 is right or top), 'C' (center), or a cardinal direction\n ('SW', southwest, is bottom left, etc.).\n\n See Also\n --------\n .Axes.set_anchor\n ", "language": "en", "n_whitespaces": 133, "n_words": 51, "vocab_size": 46 }
27
Python
25
e94dfed864a8bbeb215bab5705a490325ac07819
axes_divider.py
109,157
6
60
set_anchor
https://github.com/matplotlib/matplotlib.git
Improve argument checking
77
0
23,457
11
3
5
def parsedate_tz(data): res = _parsedate_tz(data) if not res: return if res[9] is None: res[9] = 0 return tuple(res)
python3.10.4/Lib/email/_parseaddr.py
61
XX-Net
{ "docstring": "Convert a date string to a time tuple.\n\n Accounts for military timezones.\n ", "language": "en", "n_whitespaces": 18, "n_words": 12, "vocab_size": 11 }
18
Python
14
8198943edd73a363c266633e1aa5b2a9e9c9f526
_parseaddr.py
223,619
7
36
parsedate_tz
https://github.com/XX-net/XX-Net.git
add python 3.10.4 for windows
47
0
57,010
9