complexity
int64
1
56
n_identifiers
int64
1
114
code
stringlengths
19
12.7k
path
stringlengths
8
134
n_ast_nodes
int64
12
2.35k
ast_errors
stringlengths
0
4.01k
repo
stringlengths
3
28
documentation
dict
n_words
int64
2
866
language
stringclasses
1 value
vocab_size
int64
2
323
commit_id
stringlengths
40
40
file_name
stringlengths
5
79
id
int64
243
338k
nloc
int64
1
228
token_counts
int64
5
1.4k
fun_name
stringlengths
1
77
url
stringlengths
31
60
commit_message
stringlengths
3
15.3k
n_whitespaces
int64
1
3.23k
n_ast_errors
int64
0
20
d_id
int64
74
121k
ast_levels
int64
4
29
10
26
def functions_df() -> pd.DataFrame: modules = all_view_models() all_formatted = [] for module in modules: if not FORECASTING and "forecast" in str(module): continue loaded = load_modules(module) # Gets all of a module's functions, but ignores imported functions func_list = [ x[1] for x in getmembers(loaded, isfunction) if x[1].__module__ == loaded.__name__ ] formatted_list = [format_function(x) for x in func_list] all_formatted.extend(formatted_list) func_df = pd.DataFrame() func_df["name"] = [x[0] for x in all_formatted] func_df["docstring"] = [x[1] for x in all_formatted] func_dups = len(func_df["name"]) - len(func_df["name"].drop_duplicates()) if func_dups > 0: print(f"Number of duplicate functions found: {func_dups}") print( "This may indicate that functions are defined several times in the terminal.\n" ) func_df = func_df.set_index("name") return func_df
openbb_terminal/core/scripts/sdk_audit.py
286
OpenBBTerminal
{ "docstring": "Creates a dataframe for all functions in 'models' and 'views'.\n\n Returns:\n ----------\n pd.DataFrame\n Information for all view and model functions\n ", "language": "en", "n_whitespaces": 39, "n_words": 20, "vocab_size": 16 }
109
Python
78
963ca9b2b924d0514e0e65243dc8d9d7af023ad1
sdk_audit.py
286,657
32
169
functions_df
https://github.com/OpenBB-finance/OpenBBTerminal.git
Audit SDK and View/Model functions (#3384) * Initial commit * Finalized functionality * update script * Allow using it without forecasting * Update gitignore * Update `sdk_audit.py` * Fixed issues, found more * Added fix for helper functions, and column for SDK type * Checked one more thing * Moved file * Move files ending with models/views * Added fix of name * Added file path fixes * Patch to fix sdk_audit for windows * fix Co-authored-by: Chavithra PARANA <[email protected]>
267
0
85,962
13
4
11
def train_timer(self, do='start'): if do == 'start': self.pair_it_train += 1 self.begin_time_train = time.time() elif do == 'stop': end = time.time() self.train_time += (end - self.begin_time_train) if self.pair_it_train == self.total_pairs: logger.info( f'Total time spent training pairlist {self.train_time:.2f} seconds') self.pair_it_train = 0 self.train_time = 0 return # Following methods which are overridden by user made prediction models. # See freqai/prediction_models/CatboostPredictionModel.py for an example.
freqtrade/freqai/freqai_interface.py
147
freqtrade
{ "docstring": "\n Timer designed to track the cumulative time spent training the full pairlist in\n FreqAI.\n ", "language": "en", "n_whitespaces": 36, "n_words": 14, "vocab_size": 13 }
61
Python
47
96d8882f1e6740f6c0a859c6e5f52a5a30ddb007
freqai_interface.py
150,527
13
79
train_timer
https://github.com/freqtrade/freqtrade.git
Plug mem leak, add training timer
214
0
34,788
15
5
17
def caching_device(rnn_cell): if tf.executing_eagerly(): # caching_device is not supported in eager mode. return None if not getattr(rnn_cell, '_enable_caching_device', False): return None # Don't set a caching device when running in a loop, since it is possible that # train steps could be wrapped in a tf.while_loop. In that scenario caching # prevents forward computations in loop iterations from re-reading the # updated weights. if control_flow_util.IsInWhileLoop(tf.compat.v1.get_default_graph()): logging.warning( 'Variable read device caching has been disabled because the ' 'RNN is in tf.while_loop loop context, which will cause ' 'reading stalled value in forward path. This could slow down ' 'the training due to duplicated variable reads. Please ' 'consider updating your code to remove tf.while_loop if possible.') return None if (rnn_cell._dtype_policy.compute_dtype != rnn_cell._dtype_policy.variable_dtype): logging.warning( 'Variable read device caching has been disabled since it ' 'doesn\'t work with the mixed precision API. This is ' 'likely to cause a slowdown for RNN training due to ' 'duplicated read of variable for each timestep, which ' 'will be significant in a multi remote worker setting. ' 'Please consider disabling mixed precision API if ' 'the performance has been affected.') return None # Cache the value on the device that access the variable. return lambda op: op.device
keras/layers/rnn/rnn_utils.py
178
keras
{ "docstring": "Returns the caching device for the RNN variable.\n\n This is useful for distributed training, when variable is not located as same\n device as the training worker. By enabling the device cache, this allows\n worker to read the variable once and cache locally, rather than read it every\n time step from remote when it is needed.\n\n Note that this is assuming the variable that cell needs for each time step is\n having the same value in the forward path, and only gets updated in the\n backprop. It is true for all the default cells (SimpleRNN, GRU, LSTM). If the\n cell body relies on any variable that gets updated every time step, then\n caching device will cause it to read the stall value.\n\n Args:\n rnn_cell: the rnn cell instance.\n ", "language": "en", "n_whitespaces": 141, "n_words": 127, "vocab_size": 79 }
202
Python
119
01c906c4178db5ae03b7eb2d298a052c952a0667
rnn_utils.py
268,981
25
92
caching_device
https://github.com/keras-team/keras.git
Reorganize RNN layers, cells and wrappers into smaller logically organized files hosted under an `rnn` directory. PiperOrigin-RevId: 428841673
323
0
79,802
11
1
5
def get_tables(self) -> Response: q = 'SHOW TABLES;' return self.native_query(q)
mindsdb/integrations/handlers/tdengine_handler/tdengine_handler.py
34
mindsdb
{ "docstring": "\n Get a list with all of the tabels in TDEngine\n ", "language": "en", "n_whitespaces": 25, "n_words": 10, "vocab_size": 10 }
10
Python
10
257dfe6bac18d28088c7bfc79ca22cde682f9cd6
tdengine_handler.py
116,966
6
18
get_tables
https://github.com/mindsdb/mindsdb.git
Added TDENgine Handler
40
0
25,874
7
3
9
def get_instance(self) -> t.Optional[AnsibleCoreCI]: if not self.core_ci and self.core_ci_state: self.core_ci = self.create_core_ci(load=False) self.core_ci.load(self.core_ci_state) return self.core_ci
test/lib/ansible_test/_internal/host_profiles.py
80
ansible
{ "docstring": "Return the current AnsibleCoreCI instance, loading it if not already loaded.", "language": "en", "n_whitespaces": 10, "n_words": 11, "vocab_size": 11 }
15
Python
13
3eb0485dd92c88cc92152d3656d94492db44b183
host_profiles.py
268,016
6
49
get_instance
https://github.com/ansible/ansible.git
ansible-test - Use more native type hints. (#78435) * ansible-test - Use more native type hints. Simple search and replace to switch from comments to native type hints for return types of functions with no arguments. * ansible-test - Use more native type hints. Conversion of simple single-line function annotation type comments to native type hints. * ansible-test - Use more native type hints. Conversion of single-line function annotation type comments with default values to native type hints. * ansible-test - Use more native type hints. Manual conversion of type annotation comments for functions which have pylint directives.
58
0
79,290
11
3
14
def sort_line_bbox(g, bg): xs = [bg_item[0] for bg_item in bg] xs_sorted = sorted(xs) g_sorted = [None] * len(xs_sorted) bg_sorted = [None] * len(xs_sorted) for g_item, bg_item in zip(g, bg): idx = xs_sorted.index(bg_item[0]) bg_sorted[idx] = bg_item g_sorted[idx] = g_item return g_sorted, bg_sorted
ppstructure/table/table_master_match.py
132
PaddleOCR
{ "docstring": "\n Sorted the bbox in the same line(group)\n compare coord 'x' value, where 'y' value is closed in the same group.\n :param g: index in the same group\n :param bg: bbox in the same group\n :return:\n ", "language": "en", "n_whitespaces": 54, "n_words": 35, "vocab_size": 22 }
41
Python
26
ddaa2c2552e19635cd6cdf38619f1f176c358f89
table_master_match.py
24,491
10
85
sort_line_bbox
https://github.com/PaddlePaddle/PaddleOCR.git
add SLANet
83
0
4,742
11
2
15
def get_closed() -> pd.DataFrame: bursa = all_bursa() is_open_list = [] for exchange in bursa.index: is_open = check_if_open(bursa, exchange) is_open_list.append(is_open) bursa["open"] = is_open_list bursa = bursa.loc[~bursa["open"]] return bursa[["name", "short_name"]] @log_start_end(log=logger)
openbb_terminal/stocks/tradinghours/bursa_model.py
125
@log_start_end(log=logger)
OpenBBTerminal
{ "docstring": "Get closed exchanges.\n\n Parameters\n ----------\n\n Returns\n -------\n pd.DataFrame\n Currently closed exchanges\n ", "language": "en", "n_whitespaces": 36, "n_words": 11, "vocab_size": 10 }
29
Python
23
33a041e5bf93ce93ab1a19adbc5ed74c2f1eb337
bursa_model.py
284,462
19
66
get_closed
https://github.com/OpenBB-finance/OpenBBTerminal.git
Trading hours stock feature (#1697)
63
1
84,732
10
4
20
def get_masks(slen, lengths, causal, padding_mask=None): bs = shape_list(lengths)[0] if padding_mask is not None: mask = padding_mask else: # assert lengths.max().item() <= slen alen = tf.range(slen, dtype=lengths.dtype) mask = alen < tf.expand_dims(lengths, axis=1) # attention mask is the same as mask, or triangular inferior attention (causal) if causal: attn_mask = tf.less_equal( tf.tile(tf.reshape(alen, (1, 1, slen)), (bs, slen, 1)), tf.reshape(alen, (1, slen, 1)) ) else: attn_mask = mask # sanity check # assert shape_list(mask) == [bs, slen] tf.debugging.assert_equal(shape_list(mask), [bs, slen]) if causal: tf.debugging.assert_equal(shape_list(attn_mask), [bs, slen, slen]) return mask, attn_mask
src/transformers/models/flaubert/modeling_tf_flaubert.py
243
transformers
{ "docstring": "\n Generate hidden states mask, and optionally an attention mask.\n ", "language": "en", "n_whitespaces": 16, "n_words": 9, "vocab_size": 9 }
87
Python
58
31be02f14b1724c677bb2e32a5101c7cb6448556
modeling_tf_flaubert.py
33,795
17
162
get_masks
https://github.com/huggingface/transformers.git
TF: tf.debugging assertions without tf.running_eagerly() protection (#19030)
190
0
6,152
15
2
19
def css_install_check(app_configs, **kwargs): errors = [] css_path = os.path.join( os.path.dirname(__file__), 'static', 'wagtailadmin', 'css', 'normalize.css' ) if not os.path.isfile(css_path): error_hint = % css_path errors.append( Warning( "CSS for the Wagtail admin is missing", hint=error_hint, id='wagtailadmin.W001', ) ) return errors @register(Tags.admin)
wagtail/admin/checks.py
136
@register(Tags.admin)
wagtail
{ "docstring": "\n Most likely you are running a development (non-packaged) copy of\n Wagtail and have not built the static assets -\n see https://docs.wagtail.org/en/latest/contributing/developing.html\n\n File not found: %s\n ", "language": "en", "n_whitespaces": 77, "n_words": 25, "vocab_size": 24 }
38
Python
32
e9183a95c88fe2eaf4c1d3aff9833633509713f3
checks.py
70,553
21
73
css_install_check
https://github.com/wagtail/wagtail.git
Update docs links to reference new domain
147
1
15,519
13
4
4
async def device_scan(hass, identifier, loop):
homeassistant/components/apple_tv/config_flow.py
18
core
{ "docstring": "Scan for a specific device using identifier as filter.", "language": "en", "n_whitespaces": 8, "n_words": 9, "vocab_size": 9 }
5
Python
5
7112c5b52a1e0016961a725d4ca90b57ddb350de
config_flow.py
310,881
9
77
device_scan
https://github.com/home-assistant/core.git
Use zeroconf for scanning in apple_tv (#64528)
8
0
109,552
6
1
6
def __exit__(self, *args) -> None: raise NotImplementedError( f"{self.__class__.__name__} does not support context management." )
src/prefect/blocks/abstract.py
40
prefect
{ "docstring": "\n Context management method for databases.\n ", "language": "en", "n_whitespaces": 20, "n_words": 5, "vocab_size": 5 }
14
Python
14
e51b790b7717e2603c1ea480c75e9ee02df3c869
abstract.py
60,156
7
17
__exit__
https://github.com/PrefectHQ/prefect.git
Abstract database block (#7866) Co-authored-by: Alexander Streed <[email protected]> Co-authored-by: Bill Palombi <[email protected]>
46
0
11,998
11
2
31
def split_spectrum(H, n, split_point, V0=None): N, _ = H.shape H_shift = H - split_point * jnp.eye(N, dtype=H.dtype) U, _, _, _ = qdwh.qdwh(H_shift, is_hermitian=True, dynamic_shape=(n, n)) P = -0.5 * (U - _mask(jnp.eye(N, dtype=H.dtype), (n, n))) rank = jnp.round(jnp.trace(P)).astype(jnp.int32) V_minus, V_plus = _projector_subspace(P, H, n, rank) H_minus = (V_minus.conj().T @ H) @ V_minus H_plus = (V_plus.conj().T @ H) @ V_plus if V0 is not None: V_minus = jnp.dot(V0, V_minus) V_plus = jnp.dot(V0, V_plus) return H_minus, V_minus, H_plus, V_plus, rank # To help understand the iterative version of the algorithm, the original # recursive formulation follows. # # def _eigh_work(H, V=None, termination_size=128): # # if H.shape[0] <= termination_size: # evals, evecs = jnp_linalg.eigh(H) # if V is not None: # evecs = jnp.dot(V, evecs) # return evals, evecs # # split_point = jnp.median(jnp.diag(H)) # TODO: Improve this? # H_minus, V_minus, H_plus, V_plus = split_spectrum(H, split_point, V0=V) # H_minus, V_minus = _eigh_work(H_minus, V=V_minus, termination_size=termination_size) # H_plus, V_plus = _eigh_work(H_plus, V=V_plus, termination_size=termination_size) # # evals = jnp.hstack((H_minus, H_plus)) # evecs = jnp.hstack((V_minus, V_plus)) # return evals, evecs
jax/_src/lax/eigh.py
323
jax
{ "docstring": " The Hermitian matrix `H` is split into two matrices `H_minus`\n `H_plus`, respectively sharing its eigenspaces beneath and above\n its `split_point`th eigenvalue.\n\n Returns, in addition, `V_minus` and `V_plus`, isometries such that\n `Hi = Vi.conj().T @ H @ Vi`. If `V0` is not None, `V0 @ Vi` are\n returned instead; this allows the overall isometries mapping from\n an initial input matrix to progressively smaller blocks to be formed.\n\n Args:\n H: The Hermitian matrix to split.\n split_point: The eigenvalue to split along.\n V0: Matrix of isometries to be updated.\n Returns:\n H_minus: A Hermitian matrix sharing the eigenvalues of `H` beneath\n `split_point`.\n V_minus: An isometry from the input space of `V0` to `H_minus`.\n H_plus: A Hermitian matrix sharing the eigenvalues of `H` above\n `split_point`.\n V_plus: An isometry from the input space of `V0` to `H_plus`.\n rank: The dynamic size of the m subblock.\n The main work loop performing the symmetric eigendecomposition of H.\n# Each step recursively computes a projector into the space of eigenvalues\n# above jnp.mean(jnp.diag(H)). The result of the projections into and out of\n# that space, along with the isometries accomplishing these, are then computed.\n# This is performed recursively until the projections have size 1, and thus\n# store an eigenvalue of the original input; the corresponding isometry is\n# the related eigenvector. The results are then composed.\n#\n# Args:\n# H: The Hermitian input.\n# V: Stores the isometries projecting H into its subspaces.\n# precision: :class:`~jax.lax.Precision` object specifying the matmul precision.\n#\n# Returns:\n# H, V: The result of the projection.\n# ", "language": "en", "n_whitespaces": 321, "n_words": 257, "vocab_size": 138 }
174
Python
95
b64e36b60fca9661ca2c8ae51a56fae07bf5efe6
eigh.py
120,630
13
197
split_spectrum
https://github.com/google/jax.git
Make QDWH-eig implementation jit-table. Move QDWH-eig from jax._src.scipy.eigh to jax._src.lax.eigh, in preparation for using it to back `lax.eigh` in a future change. PiperOrigin-RevId: 449362382
210
0
26,905
15
2
7
def set_dash_joinstyle(self, s): js = JoinStyle(s) if self._dashjoinstyle != js: self.stale = True self._dashjoinstyle = js
lib/matplotlib/lines.py
52
matplotlib
{ "docstring": "\n How to join segments of the line if it `~Line2D.is_dashed`.\n\n The default joinstyle is :rc:`lines.dash_joinstyle`.\n\n Parameters\n ----------\n s : `.JoinStyle` or %(JoinStyle)s\n ", "language": "en", "n_whitespaces": 65, "n_words": 22, "vocab_size": 22 }
16
Python
12
4c2662ad6f8c7b3c06554dfa3633f50dd011beb0
lines.py
107,221
5
31
set_dash_joinstyle
https://github.com/matplotlib/matplotlib.git
DOC: Document default join style in the same way as the default cap styles.
55
0
22,649
9
4
22
def set_omp_num_threads_if_unset() -> bool: num_threads_from_env = os.environ.get("OMP_NUM_THREADS") if num_threads_from_env is not None: # No ops if it's set return False # If unset, try setting the correct CPU count assigned. runtime_ctx = ray.get_runtime_context() if runtime_ctx.worker.mode != ray._private.worker.WORKER_MODE: # Non worker mode, no ops. return False num_assigned_cpus = runtime_ctx.get_assigned_resources().get("CPU") if num_assigned_cpus is None: # This is an actor task w/o any num_cpus specified, set it to 1 logger.debug( "[ray] Forcing OMP_NUM_THREADS=1 to avoid performance " "degradation with many workers (issue #6998). You can override this " "by explicitly setting OMP_NUM_THREADS, or changing num_cpus." ) num_assigned_cpus = 1 import math # For num_cpu < 1: Set to 1. # For num_cpus >= 1: Set to the floor of the actual assigned cpus. omp_num_threads = max(math.floor(num_assigned_cpus), 1) os.environ["OMP_NUM_THREADS"] = str(omp_num_threads) return True
python/ray/_private/utils.py
189
ray
{ "docstring": "Set the OMP_NUM_THREADS to default to num cpus assigned to the worker\n\n This function sets the environment variable OMP_NUM_THREADS for the worker,\n if the env is not previously set and it's running in worker (WORKER_MODE).\n\n Returns True if OMP_NUM_THREADS is set in this function.\n\n ", "language": "en", "n_whitespaces": 56, "n_words": 44, "vocab_size": 31 }
129
Python
94
7c8859f1428224710e4c2db2abf0d9ec28536301
utils.py
136,708
27
105
set_omp_num_threads_if_unset
https://github.com/ray-project/ray.git
[core] Set OMP_NUM_THREADS to `num_cpus` required by task/actors by default (#30496) Ray currently sets OMP_NUM_THREADS=1 when the environ variable is not set. This PR: Sets OMP_NUM_THREADS to the number of cpus assigned to the worker that runs a task before running, and reset it after running. If num_cpus is a fractional smaller than 1, it will set OMP_NUM_THREADS to 1. Doesn't override OMP_NUM_THREADS if it's already being specified in runtime env or through os.environ. Signed-off-by: Ricky Xu <[email protected]> Co-authored-by: Eric Liang <[email protected]> Co-authored-by: Simon Mo <[email protected]>
260
0
30,974
11
4
31
def grant_instance_level_collection_management_permissions(apps, schema_editor): Collection = apps.get_model("wagtailcore.Collection") Group = apps.get_model("auth.Group") GroupCollectionPermission = apps.get_model("wagtailcore.GroupCollectionPermission") Permission = apps.get_model("auth.Permission") groups_w_permissions = Group.objects.filter( permissions__content_type__app_label="wagtailcore", permissions__content_type__model="collection", permissions__codename__in=[ "add_collection", "change_collection", "delete_collection", ], ).values("id", "name", "permissions__id", "permissions__codename") for root_collection in Collection.objects.filter(depth=1).all(): for row in groups_w_permissions: GroupCollectionPermission.objects.create( group_id=row["id"], permission_id=row["permissions__id"], collection_id=root_collection.id, ) # Now remove the model-level permissions for collections collection_permissions = Permission.objects.filter( content_type__app_label="wagtailcore", content_type__model="collection", codename__in=["add_collection", "change_collection", "delete_collection"], ) for perm in collection_permissions.all(): perm.group_set.clear()
wagtail/core/migrations/0066_collection_management_permissions.py
296
wagtail
{ "docstring": "\n Give the groups who currently manage all collections permission to manage root collections\n ", "language": "en", "n_whitespaces": 20, "n_words": 13, "vocab_size": 11 }
64
Python
52
d10f15e55806c6944827d801cd9c2d53f5da4186
0066_collection_management_permissions.py
73,773
28
172
grant_instance_level_collection_management_permissions
https://github.com/wagtail/wagtail.git
Reformat with black
263
0
16,100
14
1
9
def test_no_default_policy(self) -> None: room_id = self.helper.create_room_as(self.user_id, tok=self.token) self._test_retention(room_id)
tests/rest/client/test_retention.py
53
synapse
{ "docstring": "Tests that an event doesn't get expired if there is neither a default retention\n policy nor a policy specific to the room.\n ", "language": "en", "n_whitespaces": 36, "n_words": 22, "vocab_size": 20 }
9
Python
9
1901cb1d4a8b7d9af64493fbd336e9aa2561c20c
test_retention.py
247,062
6
32
test_no_default_policy
https://github.com/matrix-org/synapse.git
Add type hints to `tests/rest/client` (#12084)
30
0
71,472
10
2
36
def dag_bag_ext(): clear_db_runs() dag_bag = DagBag(dag_folder=DEV_NULL, include_examples=False) dag_0 = DAG("dag_0", start_date=DEFAULT_DATE, schedule_interval=None) task_a_0 = EmptyOperator(task_id="task_a_0", dag=dag_0) task_b_0 = ExternalTaskMarker( task_id="task_b_0", external_dag_id="dag_1", external_task_id="task_a_1", recursion_depth=3, dag=dag_0 ) task_a_0 >> task_b_0 dag_1 = DAG("dag_1", start_date=DEFAULT_DATE, schedule_interval=None) task_a_1 = ExternalTaskSensor( task_id="task_a_1", external_dag_id=dag_0.dag_id, external_task_id=task_b_0.task_id, dag=dag_1 ) task_b_1 = ExternalTaskMarker( task_id="task_b_1", external_dag_id="dag_2", external_task_id="task_a_2", recursion_depth=2, dag=dag_1 ) task_a_1 >> task_b_1 dag_2 = DAG("dag_2", start_date=DEFAULT_DATE, schedule_interval=None) task_a_2 = ExternalTaskSensor( task_id="task_a_2", external_dag_id=dag_1.dag_id, external_task_id=task_b_1.task_id, dag=dag_2 ) task_b_2 = ExternalTaskMarker( task_id="task_b_2", external_dag_id="dag_3", external_task_id="task_a_3", recursion_depth=1, dag=dag_2 ) task_a_2 >> task_b_2 dag_3 = DAG("dag_3", start_date=DEFAULT_DATE, schedule_interval=None) task_a_3 = ExternalTaskSensor( task_id="task_a_3", external_dag_id=dag_2.dag_id, external_task_id=task_b_2.task_id, dag=dag_3 ) task_b_3 = EmptyOperator(task_id="task_b_3", dag=dag_3) task_a_3 >> task_b_3 for dag in [dag_0, dag_1, dag_2, dag_3]: dag_bag.bag_dag(dag=dag, root_dag=dag) yield dag_bag clear_db_runs() @pytest.fixture
tests/sensors/test_external_task_sensor.py
460
@pytest.fixture
airflow
{ "docstring": "\n Create a DagBag with DAGs looking like this. The dotted lines represent external dependencies\n set up using ExternalTaskMarker and ExternalTaskSensor.\n\n dag_0: task_a_0 >> task_b_0\n |\n |\n dag_1: ---> task_a_1 >> task_b_1\n |\n |\n dag_2: ---> task_a_2 >> task_b_2\n |\n |\n dag_3: ---> task_a_3 >> task_b_3\n ", "language": "en", "n_whitespaces": 480, "n_words": 45, "vocab_size": 35 }
111
Python
69
49e336ae0302b386a2f47269a6d13988382d975f
test_external_task_sensor.py
47,660
35
290
dag_bag_ext
https://github.com/apache/airflow.git
Replace usage of `DummyOperator` with `EmptyOperator` (#22974) * Replace usage of `DummyOperator` with `EmptyOperator`
243
1
9,197
10
15
14
def deserialize_keras_object(config, custom_objects=None):
keras/saving/experimental/serialization_lib.py
49
"""Retrieve the object by deserializing the config dict. The config dict is a Python dictionary that consists of a set of key-value pairs, and represents a Keras object, such as an `Optimizer`, `Layer`, `Metrics`, etc. The saving and loading library uses the following keys to record information of a Keras object: - `class_name`: String. This is the name of the class, as exactly defined in the source code, such as "LossesContainer". - `config`: Dict. Library-defined or user-defined key-value pairs that store the configuration of the object, as obtained by `object.get_config()`. - `module`: String. The path of the python module, such as "keras.engine.compile_utils". Built-in Keras classes expect to have prefix `keras`. - `registered_name`: String. The key the class is registered under via `keras.utils.register_keras_serializable(package, name)` API. The key has the format of '{package}>{name}', where `package` and `name` are the arguments passed to `register_keras_serializable()`. If `name` is not provided, it defaults to the class name. If `registered_name` successfully resolves to a class (that was registered), the `class_name` and `config` values in the dict will not be used. `registered_name` is only used for non-built-in classes. For example, the following dictionary represents the built-in Adam optimizer with the relevant config: ```python dict_structure = { "class_name": "Adam", "config": { "amsgrad": false, "beta_1": 0.8999999761581421, "beta_2": 0.9990000128746033, "decay": 0.0, "epsilon": 1e-07, "learning_rate": 0.0010000000474974513, "name": "Adam" }, "module": "keras.optimizers", "registered_name": None } # Returns an `Adam` instance identical to the original one. deserialize_keras_object(dict_structure) ``` If the class does not have an exported Keras namespace, the library tracks it by its `module` and `class_name`. For example: ```python dict_structure = { "class_name": "LossesContainer", "config": { "losses": [...], "total_loss_mean": {...}, }, "module": "keras.engine.compile_utils", "registered_name": "LossesContainer" } # Returns a `LossesContainer` instance identical to the original one. deserialize_keras_object(dict_structure) ``` And the following dictionary represents a user-customized````
keras
{ "docstring": "Retrieve the object by deserializing the config dict.\n\n The config dict is a Python dictionary that consists of a set of key-value\n pairs, and represents a Keras object, such as an `Optimizer`, `Layer`,\n `Metrics`, etc. The saving and loading library uses the following keys to\n record information of a Keras object:\n\n - `class_name`: String. This is the name of the class,\n as exactly defined in the source\n code, such as \"LossesContainer\".\n - `config`: Dict. Library-defined or user-defined key-value pairs that store\n the configuration of the object, as obtained by `object.get_config()`.\n - `module`: String. The path of the python module, such as\n \"keras.engine.compile_utils\". Built-in Keras classes\n expect to have prefix `keras`.\n - `registered_name`: String. The key the class is registered under via\n `keras.utils.register_keras_serializable(package, name)` API. The key has\n the format of '{package}>{name}', where `package` and `name` are the\n arguments passed to `register_keras_serializable()`. If `name` is not\n provided, it defaults to the class name. If `registered_name` successfully\n resolves to a class (that was registered), the `class_name` and `config`\n values in the dict will not be used. `registered_name` is only used for\n non-built-in classes.\n\n For example, the following dictionary represents the built-in Adam optimizer\n with the relevant config:\n\n ```python\n dict_structure = {\n \"class_name\": \"Adam\",\n \"config\": {\n \"amsgrad\": false,\n \"beta_1\": 0.8999999761581421,\n \"beta_2\": 0.9990000128746033,\n \"decay\": 0.0,\n \"epsilon\": 1e-07,\n \"learning_rate\": 0.0010000000474974513,\n \"name\": \"Adam\"\n },\n \"module\": \"keras.optimizers\",\n \"registered_name\": None\n }\n # Returns an `Adam` instance identical to the original one.\n deserialize_keras_object(dict_structure)\n ```\n\n If the class does not have an exported Keras namespace, the library tracks\n it by its `module` and `class_name`. For example:\n\n ```python\n dict_structure = {\n \"class_name\": \"LossesContainer\",\n \"config\": {\n \"losses\": [...],\n \"total_loss_mean\": {...},\n },\n \"module\": \"keras.engine.compile_utils\",\n \"registered_name\": \"LossesContainer\"\n }\n\n # Returns a `LossesContainer` instance identical to the original one.\n deserialize_keras_object(dict_structure)\n ```\n\n And the following dictionary represents a user-customized `MeanSquaredError`\n loss:\n\n ```python", "language": "en", "n_whitespaces": 591, "n_words": 296, "vocab_size": 179 }
3
Python
3
e3e3a428f0a7955040c8a8fb8b2ad6f3e16d29eb
serialization_lib.py
279,741
54
281
deserialize_keras_object
https://github.com/keras-team/keras.git
Remaster serialization logic. There were several significant flaws, most prominently: - We had 2 separate serialization systems partially overlapping and interacting with each other: the JSON encoder/decoder one, and serialize/deserialize_keras_objects. The new system is fully standalone. - We ignored objects passed via `custom_objects` most of the time. PiperOrigin-RevId: 473794783
6
3
83,118
8
9
41
def from_arrow(cls, at, index_cols=None, index=None, columns=None): ( new_frame, new_lengths, new_widths, unsupported_cols, ) = cls._partition_mgr_cls.from_arrow(at, return_dims=True) if columns is not None: new_columns = columns new_index = pd.RangeIndex(at.num_rows) if index is None else index elif index_cols: data_cols = [col for col in at.column_names if col not in index_cols] new_columns = pd.Index(data=data_cols, dtype="O") new_index = index else: assert index is None new_columns = pd.Index(data=at.column_names, dtype="O") new_index = pd.RangeIndex(at.num_rows) new_dtypes = [] for col in at.columns: if pyarrow.types.is_dictionary(col.type): new_dtypes.append(LazyProxyCategoricalDtype(at, col._name)) else: new_dtypes.append(cls._arrow_type_to_dtype(col.type)) if len(unsupported_cols) > 0: ErrorMessage.single_warning( f"Frame contain columns with unsupported data-types: {unsupported_cols}. " + "All operations with this frame will be default to pandas!" ) return cls( partitions=new_frame, index=new_index, columns=new_columns, row_lengths=new_lengths, column_widths=new_widths, dtypes=pd.Series(data=new_dtypes, index=at.column_names), index_cols=index_cols, has_unsupported_data=len(unsupported_cols) > 0, )
modin/experimental/core/execution/native/implementations/hdk_on_native/dataframe/dataframe.py
396
modin
{ "docstring": "\n Build a frame from an Arrow table.\n\n Parameters\n ----------\n at : pyarrow.Table\n Source table.\n index_cols : list of str, optional\n List of index columns in the source table which\n are ignored in transformation.\n index : pandas.Index, optional\n An index to be used by the new frame. Should present\n if `index_cols` is not None.\n columns : Index or array-like, optional\n Column labels to use for resulting frame.\n\n Returns\n -------\n HdkOnNativeDataframe\n The new frame.\n ", "language": "en", "n_whitespaces": 227, "n_words": 72, "vocab_size": 56 }
117
Python
80
219edb5fb772609d3fafaac02ded0294ea434aa8
dataframe.py
155,368
39
258
from_arrow
https://github.com/modin-project/modin.git
FIX-#4859: Add support for PyArrow Dictionary Arrays to type mapping (#5271) Co-authored-by: Iaroslav Igoshev <[email protected]> Signed-off-by: Andrey Pavlenko <[email protected]>
518
0
36,363
15
1
16
def test_retention_event_purged_with_state_event_outside_allowed(self) -> None: room_id = self.helper.create_room_as(self.user_id, tok=self.token) # Set a max_lifetime higher than the maximum allowed value. self.helper.send_state( room_id=room_id, event_type=EventTypes.Retention, body={"max_lifetime": one_day_ms * 4}, tok=self.token, ) # Check that the event is purged after waiting for the maximum allowed duration # instead of the one specified in the room's policy. self._test_retention_event_purged(room_id, one_day_ms * 1.5) # Set a max_lifetime lower than the minimum allowed value. self.helper.send_state( room_id=room_id, event_type=EventTypes.Retention, body={"max_lifetime": one_hour_ms}, tok=self.token, ) # Check that the event is purged after waiting for the minimum allowed duration # instead of the one specified in the room's policy. self._test_retention_event_purged(room_id, one_day_ms * 0.5)
tests/rest/client/test_retention.py
174
synapse
{ "docstring": "Tests that the server configuration can override the policy for a room when\n running the purge jobs.\n ", "language": "en", "n_whitespaces": 31, "n_words": 17, "vocab_size": 15 }
100
Python
49
1901cb1d4a8b7d9af64493fbd336e9aa2561c20c
test_retention.py
247,058
19
114
test_retention_event_purged_with_state_event_outside_allowed
https://github.com/matrix-org/synapse.git
Add type hints to `tests/rest/client` (#12084)
286
0
71,468
11
2
7
def raise_for_status(self) -> None: try: return super().raise_for_status() except HTTPStatusError as exc: raise PrefectHTTPStatusError.from_httpx_error(exc) from None
src/prefect/client.py
56
prefect
{ "docstring": "\n Raise an exception if the response contains an HTTPStatusError.\n\n The `PrefectHTTPStatusError` contains useful additional information that\n is not contained in the `HTTPStatusError`.\n ", "language": "en", "n_whitespaces": 51, "n_words": 22, "vocab_size": 19 }
15
Python
15
f166d70fcfcdf4fceeb222f273b8e0eab6fb1b26
client.py
55,891
11
32
raise_for_status
https://github.com/PrefectHQ/prefect.git
Create PrefectResponse
58
0
11,416
11
1
2
def test(self):
python/ray/tune/tests/test_tune_restore.py
13
ray
{ "docstring": "Trainable crashes with fail_fast flag and the original crash message\n should bubble up.", "language": "en", "n_whitespaces": 19, "n_words": 13, "vocab_size": 13 }
2
Python
2
b1496d235fce4f19fb53553e7fb78e97e1d19054
test_tune_restore.py
146,277
4
30
test
https://github.com/ray-project/ray.git
[tune] fix error handling for fail_fast case. (#22982)
9
0
33,646
6
8
17
def check_related_objects(self, field, value, opts): if field.is_relation: # Check that the field and the queryset use the same model in a # query like .filter(author=Author.objects.all()). For example, the # opts would be Author's (from the author field) and value.model # would be Author.objects.all() queryset's .model (Author also). # The field is the related field on the lhs side. if ( isinstance(value, Query) and not value.has_select_fields and not check_rel_lookup_compatibility(value.model, opts, field) ): raise ValueError( 'Cannot use QuerySet for "%s": Use a QuerySet for "%s".' % (value.model._meta.object_name, opts.object_name) ) elif hasattr(value, "_meta"): self.check_query_object_type(value, opts, field) elif hasattr(value, "__iter__"): for v in value: self.check_query_object_type(v, opts, field)
django/db/models/sql/query.py
165
django
{ "docstring": "Check the type of object passed to query relations.", "language": "en", "n_whitespaces": 8, "n_words": 9, "vocab_size": 9 }
103
Python
71
9c19aff7c7561e3a82978a272ecdaad40dda5c00
query.py
205,870
16
104
check_related_objects
https://github.com/django/django.git
Refs #33476 -- Reformatted code with Black.
378
0
51,255
16
1
6
def single_source_dijkstra_path_length(G, source, cutoff=None, weight="weight"): return multi_source_dijkstra_path_length(G, {source}, cutoff=cutoff, weight=weight)
networkx/algorithms/shortest_paths/weighted.py
50
networkx
{ "docstring": "Find shortest weighted path lengths in G from a source node.\n\n Compute the shortest path length between source and all other\n reachable nodes for a weighted graph.\n\n Parameters\n ----------\n G : NetworkX graph\n\n source : node label\n Starting node for path\n\n cutoff : integer or float, optional\n Length (sum of edge weights) at which the search is stopped.\n If cutoff is provided, only return paths with summed weight <= cutoff.\n\n weight : string or function\n If this is a string, then edge weights will be accessed via the\n edge attribute with this key (that is, the weight of the edge\n joining `u` to `v` will be ``G.edges[u, v][weight]``). If no\n such edge attribute exists, the weight of the edge is assumed to\n be one.\n\n If this is a function, the weight of an edge is the value\n returned by the function. The function must accept exactly three\n positional arguments: the two endpoints of an edge and the\n dictionary of edge attributes for that edge. The function must\n return a number or None to indicate a hidden edge.\n\n Returns\n -------\n length : dict\n Dict keyed by node to shortest path length from source.\n\n Raises\n ------\n NodeNotFound\n If `source` is not in `G`.\n\n Examples\n --------\n >>> G = nx.path_graph(5)\n >>> length = nx.single_source_dijkstra_path_length(G, 0)\n >>> length[4]\n 4\n >>> for node in [0, 1, 2, 3, 4]:\n ... print(f\"{node}: {length[node]}\")\n 0: 0\n 1: 1\n 2: 2\n 3: 3\n 4: 4\n\n Notes\n -----\n Edge weight attributes must be numerical.\n Distances are calculated as sums of weighted edges traversed.\n\n The weight function can be used to hide edges by returning None.\n So ``weight = lambda u, v, d: 1 if d['color']==\"red\" else None``\n will find the shortest red path.\n\n See Also\n --------\n single_source_dijkstra, single_source_bellman_ford_path_length\n\n ", "language": "en", "n_whitespaces": 512, "n_words": 289, "vocab_size": 174 }
10
Python
10
d82815dba6c8ddce19cd49f700298dc82a58f066
weighted.py
177,506
2
33
single_source_dijkstra_path_length
https://github.com/networkx/networkx.git
Hide edges with a weight of None in A*. (#5945) * Hide edges with a weight of None in A*. This matches the Dijkstra's weight interface. * Update Dijkstra's and A* docs for weights of None. * Add tests for A* with weight of None. * Add another test for A* with a weight function. * Document that None indicates a hidden edge.
16
0
42,410
8
1
15
def test_status_error_msg_format(ray_start_stop): config_file_name = os.path.join( os.path.dirname(__file__), "test_config_files", "deployment_fail.yaml" ) subprocess.check_output(["serve", "deploy", config_file_name]) status_response = subprocess.check_output( ["serve", "status", "-a", "http://localhost:52365/"] ) serve_status = yaml.safe_load(status_response) print("serve_status", serve_status)
python/ray/serve/tests/test_cli.py
123
ray
{ "docstring": "Deploys a faulty config file and checks its status.", "language": "en", "n_whitespaces": 8, "n_words": 9, "vocab_size": 9 }
25
Python
22
b856daebbdc923a216ce412be477c61e6cc5707e
test_cli.py
125,445
12
79
test_status_error_msg_format
https://github.com/ray-project/ray.git
[Serve] Fix Formatting of Error Messages printed in `serve status` (#26578)
63
0
27,873
11
21
27
def _get_style_dict(self, gc, rgbFace): attrib = {} forced_alpha = gc.get_forced_alpha() if gc.get_hatch() is not None: attrib['fill'] = "url(#%s)" % self._get_hatch(gc, rgbFace) if (rgbFace is not None and len(rgbFace) == 4 and rgbFace[3] != 1.0 and not forced_alpha): attrib['fill-opacity'] = _short_float_fmt(rgbFace[3]) else: if rgbFace is None: attrib['fill'] = 'none' else: if tuple(rgbFace[:3]) != (0, 0, 0): attrib['fill'] = rgb2hex(rgbFace) if (len(rgbFace) == 4 and rgbFace[3] != 1.0 and not forced_alpha): attrib['fill-opacity'] = _short_float_fmt(rgbFace[3]) if forced_alpha and gc.get_alpha() != 1.0: attrib['opacity'] = _short_float_fmt(gc.get_alpha()) offset, seq = gc.get_dashes() if seq is not None: attrib['stroke-dasharray'] = ','.join( _short_float_fmt(val) for val in seq) attrib['stroke-dashoffset'] = _short_float_fmt(float(offset)) linewidth = gc.get_linewidth() if linewidth: rgb = gc.get_rgb() attrib['stroke'] = rgb2hex(rgb) if not forced_alpha and rgb[3] != 1.0: attrib['stroke-opacity'] = _short_float_fmt(rgb[3]) if linewidth != 1.0: attrib['stroke-width'] = _short_float_fmt(linewidth) if gc.get_joinstyle() != 'round': attrib['stroke-linejoin'] = gc.get_joinstyle() if gc.get_capstyle() != 'butt': attrib['stroke-linecap'] = _capstyle_d[gc.get_capstyle()] return attrib
lib/matplotlib/backends/backend_svg.py
558
matplotlib
{ "docstring": "Generate a style string from the GraphicsContext and rgbFace.", "language": "en", "n_whitespaces": 8, "n_words": 9, "vocab_size": 9 }
145
Python
76
ec410abbb3a721e31f3aaa61e9e4f941467e35e1
backend_svg.py
108,151
37
342
_get_style_dict
https://github.com/matplotlib/matplotlib.git
Deprecate functions in backends
580
0
23,079
17
5
14
def _get_node_attribute_at_index(self, node_index, attr, attr_name): if not self._inbound_nodes: raise RuntimeError( f"The layer {self.name} has never been called " f"and thus has no defined {attr_name}." ) if not len(self._inbound_nodes) > node_index: raise ValueError( f"Asked to get {attr_name} at node " f"{node_index}, but the layer has only " f"{len(self._inbound_nodes)} inbound nodes." ) values = getattr(self._inbound_nodes[node_index], attr) if isinstance(values, list) and len(values) == 1: return values[0] else: return values
keras/engine/base_layer.py
165
keras
{ "docstring": "Private utility to retrieves an attribute (e.g. inputs) from a node.\n\n This is used to implement the methods:\n - get_input_shape_at\n - get_output_shape_at\n - get_input_at\n etc...\n\n Args:\n node_index: Integer index of the node from which\n to retrieve the attribute.\n attr: Exact node attribute name.\n attr_name: Human-readable attribute name, for error messages.\n\n Returns:\n The layer's attribute `attr` at the node of index `node_index`.\n\n Raises:\n RuntimeError: If the layer has no inbound nodes, or if called in Eager\n mode.\n ValueError: If the index provided does not match any node.\n ", "language": "en", "n_whitespaces": 257, "n_words": 86, "vocab_size": 66 }
66
Python
54
84afc5193d38057e2e2badf9c889ea87d80d8fbf
base_layer.py
270,767
17
84
_get_node_attribute_at_index
https://github.com/keras-team/keras.git
Reformatting the codebase with black. PiperOrigin-RevId: 450093126
249
0
80,572
15
4
21
def _is_current(self, file_path, zip_path): timestamp, size = self._get_date_and_size(self.zipinfo[zip_path]) if not os.path.isfile(file_path): return False stat = os.stat(file_path) if stat.st_size != size or stat.st_mtime != timestamp: return False # check that the contents match zip_contents = self.loader.get_data(zip_path) with open(file_path, 'rb') as f: file_contents = f.read() return zip_contents == file_contents
.venv/lib/python3.8/site-packages/pip/_vendor/pkg_resources/__init__.py
152
transferlearning
{ "docstring": "\n Return True if the file_path is current for this zip_path\n ", "language": "en", "n_whitespaces": 25, "n_words": 10, "vocab_size": 10 }
47
Python
36
f638f5d0e6c8ebed0e69a6584bc7f003ec646580
__init__.py
63,102
11
92
_is_current
https://github.com/jindongwang/transferlearning.git
upd; format
143
0
13,147
11
4
9
def iter_mapped_dependants(self) -> Iterator["MappedOperator"]: return ( downstream for downstream in self._iter_all_mapped_downstreams() if any(p.node_id == self.node_id for p in downstream.iter_mapped_dependencies()) )
airflow/models/taskmixin.py
69
airflow
{ "docstring": "Return mapped nodes that depend on the current task the expansion.\n\n For now, this walks the entire DAG to find mapped nodes that has this\n current task as an upstream. We cannot use ``downstream_list`` since it\n only contains operators, not task groups. In the future, we should\n provide a way to record an DAG node's all downstream nodes instead.\n ", "language": "en", "n_whitespaces": 94, "n_words": 59, "vocab_size": 45 }
20
Python
17
197cff3194e855b9207c3c0da8ae093a0d5dda55
taskmixin.py
47,757
13
42
iter_mapped_dependants
https://github.com/apache/airflow.git
Ensure TaskMap only checks "relevant" dependencies (#23053) When looking for "mapped dependants" of a task, we only want a task if it not only is a direct downstream of the task, but also it actually "uses" the task's pushed XCom for task mapping. So we need to peek into the mapped downstream task's expansion kwargs, and only count it as a mapped dependant if the upstream is referenced there.
74
0
9,246
12
1
6
def device_traits() -> list[str]: return ["sdm.devices.traits.DoorbellChime"] @pytest.fixture(autouse=True)
tests/components/nest/test_events.py
42
@pytest.fixture(autouse=True)
core
{ "docstring": "Fixture for the present traits of the device under test.", "language": "en", "n_whitespaces": 9, "n_words": 10, "vocab_size": 9 }
7
Python
7
7a5fa8eb58f49282e73f454826472ba54cd37a30
test_events.py
313,498
3
14
device_traits
https://github.com/home-assistant/core.git
Update more nest tests to use common fixtures (#73303) Update nest tests to use fixtures
12
1
112,116
7
4
7
def __getitem__(self, key): if key in self._layout_map: return self._layout_map[key] for k in self._layout_map: if re.match(k, key): return self._layout_map[k] return None
keras/dtensor/layout_map.py
74
keras
{ "docstring": "Retrieve the corresponding layout by the string key.\n\n When there isn't an exact match, all the existing keys in the layout map\n will be treated as a regex and map against the input key again. The first\n match will be returned, based on the key insertion order. Return None if\n there isn't any match found.\n\n Args:\n key: the string key as the query for the layout.\n\n Returns:\n Corresponding layout based on the query.\n ", "language": "en", "n_whitespaces": 140, "n_words": 73, "vocab_size": 50 }
20
Python
14
84afc5193d38057e2e2badf9c889ea87d80d8fbf
layout_map.py
270,587
7
48
__getitem__
https://github.com/keras-team/keras.git
Reformatting the codebase with black. PiperOrigin-RevId: 450093126
85
0
80,488
11
4
22
def load_diabetes(*, return_X_y=False, as_frame=False, scaled=True): data_filename = "diabetes_data_raw.csv.gz" target_filename = "diabetes_target.csv.gz" data = load_gzip_compressed_csv_data(data_filename) target = load_gzip_compressed_csv_data(target_filename) if scaled: data = scale(data, copy=False) data /= data.shape[0] ** 0.5 fdescr = load_descr("diabetes.rst") feature_names = ["age", "sex", "bmi", "bp", "s1", "s2", "s3", "s4", "s5", "s6"] frame = None target_columns = [ "target", ] if as_frame: frame, data, target = _convert_data_dataframe( "load_diabetes", data, target, feature_names, target_columns ) if return_X_y: return data, target return Bunch( data=data, target=target, frame=frame, DESCR=fdescr, feature_names=feature_names, data_filename=data_filename, target_filename=target_filename, data_module=DATA_MODULE, )
sklearn/datasets/_base.py
260
scikit-learn
{ "docstring": "Load and return the diabetes dataset (regression).\n\n ============== ==================\n Samples total 442\n Dimensionality 10\n Features real, -.2 < x < .2\n Targets integer 25 - 346\n ============== ==================\n\n .. note::\n The meaning of each feature (i.e. `feature_names`) might be unclear\n (especially for `ltg`) as the documentation of the original dataset is\n not explicit. We provide information that seems correct in regard with\n the scientific literature in this field of research.\n\n Read more in the :ref:`User Guide <diabetes_dataset>`.\n\n Parameters\n ----------\n return_X_y : bool, default=False\n If True, returns ``(data, target)`` instead of a Bunch object.\n See below for more information about the `data` and `target` object.\n\n .. versionadded:: 0.18\n\n as_frame : bool, default=False\n If True, the data is a pandas DataFrame including columns with\n appropriate dtypes (numeric). The target is\n a pandas DataFrame or Series depending on the number of target columns.\n If `return_X_y` is True, then (`data`, `target`) will be pandas\n DataFrames or Series as described below.\n\n .. versionadded:: 0.23\n\n scaled : bool, default=True\n If True, the feature variables are mean centered and scaled by the\n standard deviation times the square root of `n_samples`.\n If False, raw data is returned for the feature variables.\n\n .. versionadded:: 1.1\n\n Returns\n -------\n data : :class:`~sklearn.utils.Bunch`\n Dictionary-like object, with the following attributes.\n\n data : {ndarray, dataframe} of shape (442, 10)\n The data matrix. If `as_frame=True`, `data` will be a pandas\n DataFrame.\n target: {ndarray, Series} of shape (442,)\n The regression target. If `as_frame=True`, `target` will be\n a pandas Series.\n feature_names: list\n The names of the dataset columns.\n frame: DataFrame of shape (442, 11)\n Only present when `as_frame=True`. DataFrame with `data` and\n `target`.\n\n .. versionadded:: 0.23\n DESCR: str\n The full description of the dataset.\n data_filename: str\n The path to the location of the data.\n target_filename: str\n The path to the location of the target.\n\n (data, target) : tuple if ``return_X_y`` is True\n Returns a tuple of two ndarray of shape (n_samples, n_features)\n A 2D array with each row representing one sample and each column\n representing the features and/or target of a given sample.\n\n .. versionadded:: 0.18\n ", "language": "en", "n_whitespaces": 739, "n_words": 339, "vocab_size": 194 }
80
Python
60
a793c1f0ad7dd63b2a896d2e84087089a11e7fca
_base.py
258,643
30
164
load_diabetes
https://github.com/scikit-learn/scikit-learn.git
DOC Ensures that sklearn.datasets._base.load_breast_cancer passes numpydoc validation (#22346) Co-authored-by: Guillaume Lemaitre <[email protected]> Co-authored-by: Arturo Amor <[email protected]>
234
0
75,347
11
1
5
def require_pyctcdecode(test_case): return unittest.skipUnless(is_pyctcdecode_available(), "test requires pyctcdecode")(test_case)
src/transformers/testing_utils.py
37
transformers
{ "docstring": "\n Decorator marking a test that requires pyctcdecode\n ", "language": "en", "n_whitespaces": 14, "n_words": 7, "vocab_size": 7 }
7
Python
7
57e6464ac9a31156f1c93e59107323e6ec01309e
testing_utils.py
37,487
2
20
require_pyctcdecode
https://github.com/huggingface/transformers.git
Update all require decorators to use skipUnless when possible (#16999)
13
0
6,794
10
1
8
def test_get_entities_changed(self) -> None: cache = StreamChangeCache("#test", 1) cache.entity_has_changed("[email protected]", 2) cache.entity_has_changed("[email protected]", 3) cache.entity_has_changed("[email protected]", 4) # Query all the entries, but mid-way through the stream. We should only # get the ones after that point. self.assertEqual( cache.get_entities_changed( ["[email protected]", "[email protected]", "[email protected]"], stream_pos=2 ), {"[email protected]", "[email protected]"}, ) # Query all the entries mid-way through the stream, but include one # that doesn't exist in it. We shouldn't get back the one that doesn't # exist. self.assertEqual( cache.get_entities_changed( [ "[email protected]", "[email protected]", "[email protected]", "[email protected]", ], stream_pos=2, ), {"[email protected]", "[email protected]"}, ) # Query all the entries, but before the first known point. We will get # all the entries we queried for, including ones that don't exist. self.assertEqual( cache.get_entities_changed( [ "[email protected]", "[email protected]", "[email protected]", "[email protected]", ], stream_pos=0, ), {"[email protected]", "[email protected]", "[email protected]", "[email protected]"}, ) # Query a subset of the entries mid-way through the stream. We should # only get back the subset. self.assertEqual( cache.get_entities_changed(["[email protected]"], stream_pos=2), {"[email protected]"}, )
tests/util/test_stream_change_cache.py
281
synapse
{ "docstring": "\n StreamChangeCache.get_entities_changed will return the entities in the\n given list that have changed since the provided stream ID. If the\n stream position is earlier than the earliest known position, it will\n return all of the entities queried for.\n ", "language": "en", "n_whitespaces": 74, "n_words": 37, "vocab_size": 28 }
150
Python
77
acea4d7a2ff61b5beda420b54a8451088060a8cd
test_stream_change_cache.py
250,017
45
158
test_get_entities_changed
https://github.com/matrix-org/synapse.git
Add missing types to tests.util. (#14597) Removes files under tests.util from the ignored by list, then fully types all tests/util/*.py files.
682
0
73,231
11
3
7
def is_hash_allowed(self, hashes): # type: (Optional[Hashes]) -> bool if hashes is None or not self.has_hash: return False # Assert non-None so mypy knows self.hash_name and self.hash are str. assert self.hash_name is not None assert self.hash is not None return hashes.is_hash_allowed(self.hash_name, hex_digest=self.hash) # TODO: Relax this comparison logic to ignore, for example, fragments.
.venv/lib/python3.8/site-packages/pip/_internal/models/link.py
79
transferlearning
{ "docstring": "\n Return True if the link has a hash and it is allowed.\n ", "language": "en", "n_whitespaces": 27, "n_words": 12, "vocab_size": 12 }
52
Python
40
f638f5d0e6c8ebed0e69a6584bc7f003ec646580
link.py
60,852
6
49
is_hash_allowed
https://github.com/jindongwang/transferlearning.git
upd; format
111
0
12,300
9
1
3
def get_paths(self, path): # type: (str) -> t.List[str] return []
test/lib/ansible_test/_internal/provider/source/unsupported.py
21
ansible
{ "docstring": "Return the list of available content paths under the given path.", "language": "en", "n_whitespaces": 10, "n_words": 11, "vocab_size": 10 }
10
Python
10
de5f60e374524de13fe079b52282cd7a9eeabd5f
unsupported.py
266,497
2
11
get_paths
https://github.com/ansible/ansible.git
ansible-test - Improve help for unsupported cwd. (#76866) * ansible-test - Improve help for unsupported cwd. * The `--help` option is now available when an unsupported cwd is in use. * The `--help` output now shows the same instructions about cwd as would be shown in error messages if the cwd is unsupported. * Add `--version` support to show the ansible-core version. * The explanation about cwd usage has been improved to explain more clearly what is required. Resolves https://github.com/ansible/ansible/issues/64523 Resolves https://github.com/ansible/ansible/issues/67551
25
0
78,438
6
2
9
def _normalized_keys(self, section, items): # type: (str, Iterable[Tuple[str, Any]]) -> Dict[str, Any] normalized = {} for name, val in items: key = section + "." + _normalize_name(name) normalized[key] = val return normalized
.venv/lib/python3.8/site-packages/pip/_internal/configuration.py
65
transferlearning
{ "docstring": "Normalizes items to construct a dictionary with normalized keys.\n\n This routine is where the names become keys and are made the same\n regardless of source - configuration files or environment.\n ", "language": "en", "n_whitespaces": 51, "n_words": 30, "vocab_size": 29 }
32
Python
27
f638f5d0e6c8ebed0e69a6584bc7f003ec646580
configuration.py
60,675
6
39
_normalized_keys
https://github.com/jindongwang/transferlearning.git
upd; format
89
0
12,240
11
1
38
def check_figures_equal(*, extensions=("png", "pdf", "svg"), tol=0):
lib/matplotlib/testing/decorators.py
130
""" Decorator for test cases that generate and compare two figures. The decorated function must take two keyword arguments, *fig_test* and *fig_ref*, and draw the test and reference images on them. After the function returns, the figures are saved and compared. This decorator should be preferred over `image_comparison` when possible in order to keep the size of the test suite from ballooning. Parameters ---------- extensions : list, default: ["png", "pdf", "svg"]The extensions toThe RMS threshold above which thefailedIf any new figures are created (and not subsequently closed) inside the test function. Examples -------- Check that calling `.Axes.plot` with a single argument plots it against ``[0, 1, 2, ...]``::and not subsequently closed) inside the testCheck that calling `.Axes.plot` with a single argument plots it against ``
matplotlib
{ "docstring": "\n Decorator for test cases that generate and compare two figures.\n\n The decorated function must take two keyword arguments, *fig_test*\n and *fig_ref*, and draw the test and reference images on them.\n After the function returns, the figures are saved and compared.\n\n This decorator should be preferred over `image_comparison` when possible in\n order to keep the size of the test suite from ballooning.\n\n Parameters\n ----------\n extensions : list, default: [\"png\", \"pdf\", \"svg\"]\n The extensions to test.\n tol : float\n The RMS threshold above which the test is considered failed.\n\n Raises\n ------\n RuntimeError\n If any new figures are created (and not subsequently closed) inside\n the test function.\n\n Examples\n --------\n Check that calling `.Axes.plot` with a single argument plots it against\n ``[0, 1, 2, ...]``::\n", "language": "en", "n_whitespaces": 200, "n_words": 121, "vocab_size": 97 }
6
Python
6
ca78e3d0eba4d948835c5499e0ff4084b998f28e
decorators.py
110,184
39
45
check_figures_equal
https://github.com/matplotlib/matplotlib.git
[DOC] swapped params in fig_compare_error msg
9
8
23,964
9
2
11
def lowest_common_ancestor(G, node1, node2, default=None): ans = list(all_pairs_lowest_common_ancestor(G, pairs=[(node1, node2)])) if ans: assert len(ans) == 1 return ans[0][1] else: return default @not_implemented_for("undirected") @not_implemented_for("multigraph")
networkx/algorithms/lowest_common_ancestors.py
105
@not_implemented_for("undirected") @not_implemented_for("multigraph")
networkx
{ "docstring": "Compute the lowest common ancestor of the given pair of nodes.\n\n Parameters\n ----------\n G : NetworkX directed graph\n\n node1, node2 : nodes in the graph.\n\n default : object\n Returned if no common ancestor between `node1` and `node2`\n\n Returns\n -------\n The lowest common ancestor of node1 and node2,\n or default if they have no common ancestors.\n\n Examples\n --------\n >>> G = nx.DiGraph([(0, 1), (0, 2), (2, 3), (2, 4), (1, 6), (4, 5)])\n >>> nx.lowest_common_ancestor(G, 3, 5)\n 2\n\n We can also set `default` argument as below. The value of default is returned\n if there are no common ancestors of given two nodes.\n\n >>> G = nx.DiGraph([(4, 5), (12, 13)])\n >>> nx.lowest_common_ancestor(G, 12, 5, default=\"No common ancestors!\")\n 'No common ancestors!'\n\n Notes\n -----\n Only defined on non-null directed acyclic graphs.\n Takes n log(n) time in the size of the graph.\n See `all_pairs_lowest_common_ancestor` when you have\n more than one pair of nodes of interest.\n\n See Also\n --------\n tree_all_pairs_lowest_common_ancestor\n all_pairs_lowest_common_ancestor\n ", "language": "en", "n_whitespaces": 252, "n_words": 155, "vocab_size": 107 }
23
Python
22
abaa68779ccb4cce8d1a5ecade622ab96d01edeb
lowest_common_ancestors.py
176,975
7
55
lowest_common_ancestor
https://github.com/networkx/networkx.git
Add examples to lowest common ancestors algorithms (#5531) * Add examples to lowest common ancestors documentation * Fix output style of examples * Fix output style of example * Update pre-commit * Update networkx/algorithms/lowest_common_ancestors.py Co-authored-by: Ross Barnowski <[email protected]> * Update networkx/algorithms/lowest_common_ancestors.py Co-authored-by: Ross Barnowski <[email protected]> * Indentation fix & pprint dictionary * Update networkx/algorithms/lowest_common_ancestors.py Co-authored-by: Ross Barnowski <[email protected]> * Update networkx/algorithms/lowest_common_ancestors.py Co-authored-by: Ross Barnowski <[email protected]> * Update networkx/algorithms/lowest_common_ancestors.py Co-authored-by: Ross Barnowski <[email protected]> * Move "import pprint" to the example Co-authored-by: dtuncturk <[email protected]> Co-authored-by: Ross Barnowski <[email protected]>
54
1
42,203
13
7
23
def _get_per_session_stats(self): if self._per_session_stats is None: logger.debug("Collating per session stats") compiled = [] for session_id, ts_data in self._time_stats.items(): logger.debug("Compiling session ID: %s", session_id) if self._state is None: logger.debug("Session state dict doesn't exist. Most likely task has been " "terminated during compilation") return compiled.append(self._collate_stats(session_id, ts_data)) self._per_session_stats = list(sorted(compiled, key=lambda k: k["session"])) elif self._session.is_training: logger.debug("Collating per session stats for latest training data") session_id = self._session.session_ids[-1] ts_data = self._time_stats[session_id] if session_id > len(self._per_session_stats): self._per_session_stats.append(self._collate_stats(session_id, ts_data)) stats = self._per_session_stats[-1] stats["start"] = ts_data["start_time"] stats["end"] = ts_data["end_time"] stats["elapsed"] = int(stats["end"] - stats["start"]) stats["iterations"] = ts_data["iterations"] stats["rate"] = (((stats["batch"] * 2) * stats["iterations"]) / stats["elapsed"] if stats["elapsed"] != 0 else 0) logger.debug("per_session_stats: %s", self._per_session_stats)
lib/gui/analysis/stats.py
425
faceswap
{ "docstring": " Populate the attribute :attr:`_per_session_stats` with a sorted list by session ID\n of each ID in the training/loaded session. Stats contain the session ID, start, end and\n elapsed times, the training rate, batch size and number of iterations for each session.\n\n If a training session is running, then updates the training sessions stats only.\n ", "language": "en", "n_whitespaces": 82, "n_words": 53, "vocab_size": 39 }
107
Python
79
c1512fd41d86ef47a5d1ce618d6d755ef7cbacdf
stats.py
100,313
26
249
_get_per_session_stats
https://github.com/deepfakes/faceswap.git
Update code to support Tensorflow versions up to 2.8 (#1213) * Update maximum tf version in setup + requirements * - bump max version of tf version in launcher - standardise tf version check * update keras get_custom_objects for tf>2.6 * bugfix: force black text in GUI file dialogs (linux) * dssim loss - Move to stock tf.ssim function * Update optimizer imports for compatibility * fix logging for tf2.8 * Fix GUI graphing for TF2.8 * update tests * bump requirements.txt versions * Remove limit on nvidia-ml-py * Graphing bugfixes - Prevent live graph from displaying if data not yet available * bugfix: Live graph. Collect loss labels correctly * fix: live graph - swallow inconsistent loss errors * Bugfix: Prevent live graph from clearing during training * Fix graphing for AMD
447
0
19,810
18
1
4
def load_info(): text = console.print(text)
gamestonk_terminal/portfolio/portfolio_view.py
40
OpenBBTerminal
{ "docstring": "Prints instructions to load a CSV\n\n Returns\n ----------\n text : str\n Information on how to load a csv\n \nIn order to load a CSV do the following:\n\n1. Add headers to the first row, below is data for each column:\\n\n\\t1. Identifier for the asset (such as a stock ticker)\n\\t2. Type of asset (stock, bond, option, crypto)\n\\t3. The volume of the asset transacted\n\\t4. The buy date in yyyy/mm/dd\n\\t5. The Price paid for the asset\n\\t6. Any fees paid during the transaction\n\\t7. A premium paid or received if this was an option\n\\t8. Whether the asset was bought (covered) or sold (shorted)\\n\n2. Place this file in gamestonk_terminal/portfolio/portfolios\\n\n ", "language": "en", "n_whitespaces": 128, "n_words": 112, "vocab_size": 82 }
5
Python
5
82747072c511beb1b2672846ae2ee4aec53eb562
portfolio_view.py
281,521
16
14
load_info
https://github.com/OpenBB-finance/OpenBBTerminal.git
Terminal Wide Rich (#1161) * My idea for how we handle Rich moving forward * remove independent consoles * FIxed pylint issues * add a few vars * Switched print to console * More transitions * Changed more prints * Replaced all prints * Fixing tabulate * Finished replace tabulate * Finished removing rich from Tabulate * add Panel around menu * add GST watermark under feature flag * Fixed 46 tests * Delete test_screener[False].yaml * Delete test_screener[True].yaml * Fixed the rest of the tests * add help and source color vars and use rgb * rich on stocks/options * update rich on disc, dps, sia * rich in gov, ins and scr menus * ba and ca menus with rich * Fixed import issue * Fixed some tests * removed termcolor * Removed prettytable * add rich to remaining stocks menus * FIxed linting issue * Added James' changes * Updated dependencies * Add rich to cryptocurrency menu * refactor economy and forex * refactor etf with rich * refactor mfunds * refactor rich rest * not specify style so default color works well on any background * Fixing mypy issues * Updated tests * More test fixes * James' test fixes * Updating tests : stocks/screener - fix cassettes using BR * Updating tests : crypto * Updating tests : disable DEBUG_MODE * Updating tests : stocks/fa/yfinance * minor fixes that escape * Improve the rich table function (that replaces tabulate :D ) * Fixed bad code * delete rogue file + dcf fix + NoConsole * sia mypy * fuck you linter * fuck you linter pt 2 * skip hehe * i hate the black linter * ubuntu mypy attempt * Update : rich_config + gtff * Updating tests : conftest * Updating tests : stocks * Update : rich_config * Updating : rich_config * make panel configurable for Theodore :b * colors update * Merged * Updating : rich_config + feature_flags * Updating : rich_config * Updating tests : stocks * Updating : feature_flags Co-authored-by: DidierRLopes <[email protected]> Co-authored-by: Chavithra PARANA <[email protected]> Co-authored-by: james <[email protected]> Co-authored-by: jose-donato <[email protected]>
15
0
83,824
7
1
6
def MultivariateNormal(name, mu, sigma): r return multivariate_rv(MultivariateNormalDistribution, name, mu, sigma) #------------------------------------------------------------------------------- # Multivariate Laplace distribution --------------------------------------------
sympy/stats/joint_rv_types.py
33
sympy
{ "docstring": "\n Creates a continuous random variable with Multivariate Normal\n Distribution.\n\n The density of the multivariate normal distribution can be found at [1].\n\n Parameters\n ==========\n\n mu : List representing the mean or the mean vector\n sigma : Positive semidefinite square matrix\n Represents covariance Matrix\n If `\\sigma` is noninvertible then only sampling is supported currently\n\n Returns\n =======\n\n RandomSymbol\n\n Examples\n ========\n\n >>> from sympy.stats import MultivariateNormal, density, marginal_distribution\n >>> from sympy import symbols, MatrixSymbol\n >>> X = MultivariateNormal('X', [3, 4], [[2, 1], [1, 2]])\n >>> y, z = symbols('y z')\n >>> density(X)(y, z)\n sqrt(3)*exp(-y**2/3 + y*z/3 + 2*y/3 - z**2/3 + 5*z/3 - 13/3)/(6*pi)\n >>> density(X)(1, 2)\n sqrt(3)*exp(-4/3)/(6*pi)\n >>> marginal_distribution(X, X[1])(y)\n exp(-(y - 4)**2/4)/(2*sqrt(pi))\n >>> marginal_distribution(X, X[0])(y)\n exp(-(y - 3)**2/4)/(2*sqrt(pi))\n\n The example below shows that it is also possible to use\n symbolic parameters to define the MultivariateNormal class.\n\n >>> n = symbols('n', integer=True, positive=True)\n >>> Sg = MatrixSymbol('Sg', n, n)\n >>> mu = MatrixSymbol('mu', n, 1)\n >>> obs = MatrixSymbol('obs', n, 1)\n >>> X = MultivariateNormal('X', mu, Sg)\n\n The density of a multivariate normal can be\n calculated using a matrix argument, as shown below.\n\n >>> density(X)(obs)\n (exp(((1/2)*mu.T - (1/2)*obs.T)*Sg**(-1)*(-mu + obs))/sqrt((2*pi)**n*Determinant(Sg)))[0, 0]\n\n References\n ==========\n\n .. [1] https://en.wikipedia.org/wiki/Multivariate_normal_distribution\n\n ", "language": "en", "n_whitespaces": 325, "n_words": 193, "vocab_size": 137 }
16
Python
15
9ad8ab9fe58051cf11626ba6654852fcfec60147
joint_rv_types.py
196,702
58
22
MultivariateNormal
https://github.com/sympy/sympy.git
Documentation cleanup 5
19
0
48,120
7
2
7
def cursor_text_end(self) -> bool: text_length = len(self.content) if self.cursor_index == text_length: return False self.cursor_index = text_length return True
src/textual/_text_backend.py
54
textual
{ "docstring": "Move the cursor to the end of the text\n\n Returns:\n bool: True if the cursor moved. False otherwise.\n ", "language": "en", "n_whitespaces": 43, "n_words": 18, "vocab_size": 14 }
18
Python
14
dd18ecbdbe744812509630935a877424202f2a70
_text_backend.py
183,434
11
32
cursor_text_end
https://github.com/Textualize/textual.git
Docstring improvements
64
0
44,186
9
2
7
def has_perms(self, perm_list, obj=None): # noqa: D205, D212, D400, D415 return all(self.has_perm(perm, obj) for perm in perm_list)
saleor/permission/models.py
44
saleor
{ "docstring": "\n Return True if the user has each of the specified permissions. If\n object is passed, check if the user has all required perms for it.\n ", "language": "en", "n_whitespaces": 47, "n_words": 25, "vocab_size": 20 }
17
Python
17
d5ef58653803075849a6a13177e7a6e604aa2f60
models.py
30,034
2
28
has_perms
https://github.com/saleor/saleor.git
Move PermissionsMixin from django auth
32
0
5,285
9
7
25
def get(self): response = { 'learn': False, 'predict': False, 'analyse': False } if os.name != 'posix': return response for process_type in response: processes_dir = Path(tempfile.gettempdir()).joinpath(f'mindsdb/processes/{process_type}/') if not processes_dir.is_dir(): continue process_marks = [x.name for x in processes_dir.iterdir()] for p_mark in process_marks: pid = int(p_mark.split('-')[0]) try: psutil.Process(pid) except Exception: processes_dir.joinpath(p_mark).unlink() else: response[process_type] = True return response @ns_conf.route('/telemetry')
mindsdb/api/http/namespaces/util.py
230
@ns_conf.route('/telemetry')
mindsdb
{ "docstring": " Checks server use native for learn or analyse.\n Will return right result only on Linux.\n ", "language": "en", "n_whitespaces": 34, "n_words": 15, "vocab_size": 15 }
55
Python
42
44d7ef0e08e5144870ad2831ce6e221f9044c47c
util.py
114,397
22
125
get
https://github.com/mindsdb/mindsdb.git
'files' route
316
1
25,182
16
1
6
def add_suffix(self, suffix): # noqa: PR01, RT01, D200 return self.__constructor__( query_compiler=self._query_compiler.add_suffix(suffix) )
modin/pandas/dataframe.py
41
modin
{ "docstring": "\n Suffix labels with string `suffix`.\n ", "language": "en", "n_whitespaces": 20, "n_words": 5, "vocab_size": 5 }
12
Python
12
b541b6c18e6fb4515e998b9b4f88528490cf69c6
dataframe.py
155,480
4
24
add_suffix
https://github.com/modin-project/modin.git
REFACTOR-#3948: Use `__constructor__` in `DataFrame` and `Series` classes (#5485) Signed-off-by: Anatoly Myachev <[email protected]>
45
0
36,395
11
5
28
def _broadcasting_select_mhlo(which, x, y): which_type, x_type, y_type = ( ir.RankedTensorType(v.type) for v in (which, x, y)) out_shape = list(lax_internal.broadcast_shapes( tuple(which_type.shape), tuple(x_type.shape), tuple(y_type.shape))) bcast_dims = lambda shape: mlir.dense_int_elements( range(len(out_shape) - len(shape), len(out_shape))) if which_type.shape != out_shape: which = mhlo.BroadcastInDimOp( ir.RankedTensorType.get(out_shape, which_type.element_type), which, bcast_dims(which_type.shape)) if x_type.shape != out_shape: x = mhlo.BroadcastInDimOp( ir.RankedTensorType.get(out_shape, x_type.element_type), x, bcast_dims(x_type.shape)) if y_type.shape != out_shape: y = mhlo.BroadcastInDimOp( ir.RankedTensorType.get(out_shape, y_type.element_type), y, bcast_dims(y_type.shape)) return mhlo.SelectOp(which, x, y).result
jax/_src/lax/linalg.py
316
jax
{ "docstring": "Wrapper around XLA `Select` that broadcasts its arguments.", "language": "en", "n_whitespaces": 7, "n_words": 8, "vocab_size": 8 }
68
Python
50
bc658e74567ffa941b31f4e89463dc713d2ecbf4
linalg.py
120,043
20
211
_broadcasting_select_mhlo
https://github.com/google/jax.git
[MHLO] Add direct MHLO lowerings for most linear algebra kernels. PiperOrigin-RevId: 439927594
140
0
26,747
14
1
4
def preprocess_input(x, data_format=None): return x @keras_export("keras.applications.regnet.decode_predictions")
keras/applications/regnet.py
32
@keras_export("keras.applications.regnet.decode_predictions")
keras
{ "docstring": "A placeholder method for backward compatibility.\n\n The preprocessing logic has been included in the regnet model\n implementation. Users are no longer required to call this method to\n normalize the input data. This method does nothing and only kept as a\n placeholder to align the API surface between old and new version of model.\n\n Args:\n x: A floating point `numpy.array` or a `tf.Tensor`.\n data_format: Optional data format of the image tensor/array. Defaults to\n None, in which case the global setting\n `tf.keras.backend.image_data_format()` is used (unless you changed it,\n it defaults to \"channels_last\").{mode}\n\n Returns:\n Unchanged `numpy.array` or `tf.Tensor`.\n ", "language": "en", "n_whitespaces": 152, "n_words": 95, "vocab_size": 76 }
6
Python
6
3613c3defc39c236fb1592c4f7ba1a9cc887343a
regnet.py
278,618
2
12
preprocess_input
https://github.com/keras-team/keras.git
Remove pylint comments. PiperOrigin-RevId: 452353044
11
1
82,631
7
4
10
def setDebugActions(self, startAction, successAction, exceptionAction): self.debugActions = (startAction or _defaultStartDebugAction, successAction or _defaultSuccessDebugAction, exceptionAction or _defaultExceptionDebugAction) self.debug = True return self
.venv/lib/python3.8/site-packages/pip/_vendor/pyparsing.py
54
transferlearning
{ "docstring": "\n Enable display of debugging messages while doing pattern matching.\n ", "language": "en", "n_whitespaces": 24, "n_words": 9, "vocab_size": 9 }
21
Python
18
f638f5d0e6c8ebed0e69a6584bc7f003ec646580
pyparsing.py
63,461
6
36
setDebugActions
https://github.com/jindongwang/transferlearning.git
upd; format
105
0
13,314
8
1
4
def preferred_batch_format(cls) -> BatchFormat: return BatchFormat.PANDAS
python/ray/train/predictor.py
22
ray
{ "docstring": "Batch format hint for upstream producers to try yielding best block format.\n\n The preferred batch format to use if both `_predict_pandas` and\n `_predict_numpy` are implemented. Defaults to Pandas.\n\n Can be overriden by predictor classes depending on the framework type,\n e.g. TorchPredictor prefers Numpy and XGBoostPredictor prefers Pandas as\n native batch format.\n\n ", "language": "en", "n_whitespaces": 93, "n_words": 51, "vocab_size": 44 }
6
Python
6
326d84f1149319809191e7887155df7f04f6f46a
predictor.py
136,395
12
12
preferred_batch_format
https://github.com/ray-project/ray.git
[AIR][Predictor] Enable numpy based predictor (#28917) Co-authored-by: Clark Zinzow <[email protected]> Co-authored-by: Amog Kamsetty <[email protected]>
20
0
30,906
6
1
20
def test_song_from_data_dump(): # Loads from str song = Song.from_data_dump( ) assert song.name == "Ropes" assert song.artists == ["Dirty Palm", "Chandler Jewels"] assert song.album_name == "Ropes" assert song.album_artist == "Dirty Palm" assert song.genres == ["gaming edm", "melbourne bounce international"] assert song.disc_number == 1 assert song.duration == 188 assert song.year == 2021 assert song.date == "2021-10-28" assert song.track_number == 1 assert song.tracks_count == 1 assert song.isrc == "GB2LD2110301" assert song.song_id == "1t2qKa8K72IBC8yQlhD9bU" assert ( song.cover_url == "https://i.scdn.co/image/ab67616d0000b273fe2cb38e4d2412dbb0e54332" ) assert song.explicit == False assert song.download_url == None
tests/types/test_song.py
207
spotify-downloader
{ "docstring": "\n Tests if Song.from_data_dump() works correctly.\n \n {\n \"name\": \"Ropes\",\n \"artists\": [\"Dirty Palm\", \"Chandler Jewels\"],\n \"album_name\": \"Ropes\",\n \"album_artist\": \"Dirty Palm\",\n \"genres\": [\"gaming edm\", \"melbourne bounce international\"],\n \"disc_number\": 1,\n \"duration\": 188,\n \"year\": 2021,\n \"date\": \"2021-10-28\",\n \"track_number\": 1,\n \"tracks_count\": 1,\n \"isrc\": \"GB2LD2110301\",\n \"song_id\": \"1t2qKa8K72IBC8yQlhD9bU\",\n \"cover_url\": \"https://i.scdn.co/image/ab67616d0000b273fe2cb38e4d2412dbb0e54332\",\n \"explicit\": false,\n \"download_url\": null,\n \"artist\" : \"Dirty Palm\",\n \"disc_count\": 1,\n \"copyright\": \"\",\n \"publisher\": \"\",\n \"url\": \"https://open.spotify.com/track/1t2qKa8K72IBC8yQlhD9bU\"\n }\n ", "language": "en", "n_whitespaces": 319, "n_words": 59, "vocab_size": 51 }
84
Python
50
fa2ad657482aca9dc628e6d7062b8badf2706bb6
test_song.py
30,144
47
119
test_song_from_data_dump
https://github.com/spotDL/spotify-downloader.git
v4 init
169
0
5,345
9
8
21
def _dedupe_indices_in_rule(self, rule): index_rules = {k:v for k,v in rule.items() if isinstance(k, TensorIndex)} other_rules = {k:v for k,v in rule.items() if k not in index_rules.keys()} exclude = set(self.get_indices()) newrule = {} newrule.update(index_rules) exclude.update(index_rules.keys()) exclude.update(index_rules.values()) for old, new in other_rules.items(): new_renamed = self._dedupe_indices(new, exclude) if old == new or new_renamed is None: newrule[old] = new else: newrule[old] = new_renamed exclude.update(get_indices(new_renamed)) return newrule
sympy/tensor/tensor.py
245
sympy
{ "docstring": "\n rule: dict\n\n This applies self._dedupe_indices on all values of rule.\n ", "language": "en", "n_whitespaces": 32, "n_words": 10, "vocab_size": 10 }
61
Python
39
1eee7b6ba5b4903ac889a73feab130572d232554
tensor.py
200,567
16
153
_dedupe_indices_in_rule
https://github.com/sympy/sympy.git
Add TensMul._dedupe_indices_in_rule This applies self._dedupe_indices on all values of `rule`.
209
0
49,702
14
1
28
def test_delete_media_never_accessed(self) -> None: # upload and do not access server_and_media_id = self._create_media() self.pump(1.0) # test that the file exists media_id = server_and_media_id.split("/")[1] local_path = self.filepaths.local_media_filepath(media_id) self.assertTrue(os.path.exists(local_path)) # timestamp after upload/create now_ms = self.clock.time_msec() channel = self.make_request( "POST", self.url + "?before_ts=" + str(now_ms), access_token=self.admin_user_tok, ) self.assertEqual(200, channel.code, msg=channel.json_body) self.assertEqual(1, channel.json_body["total"]) self.assertEqual( media_id, channel.json_body["deleted_media"][0], ) self._access_media(server_and_media_id, False)
tests/rest/admin/test_media.py
236
synapse
{ "docstring": "\n Tests that media deleted if it is older than `before_ts` and never accessed\n `last_access_ts` is `NULL` and `created_ts` < `before_ts`\n ", "language": "en", "n_whitespaces": 42, "n_words": 20, "vocab_size": 17 }
56
Python
48
c97042f7eef3748e17c90e48a4122389a89c4735
test_media.py
249,114
23
146
test_delete_media_never_accessed
https://github.com/matrix-org/synapse.git
Use literals in place of `HTTPStatus` constants in tests (#13469)
230
0
72,621
11
7
11
def parseline(self, line): line = line.strip() if not line: return None, None, line elif line[0] == '?': line = 'help ' + line[1:] elif line[0] == '!': if hasattr(self, 'do_shell'): line = 'shell ' + line[1:] else: return None, None, line i, n = 0, len(line) while i < n and line[i] in self.identchars: i = i+1 cmd, arg = line[:i], line[i:].strip() return cmd, arg, line
python3.10.4/Lib/cmd.py
211
XX-Net
{ "docstring": "Parse the line into a command name and a string containing\n the arguments. Returns a tuple containing (command, args, line).\n 'command' and 'args' may be None if the line couldn't be parsed.\n ", "language": "en", "n_whitespaces": 54, "n_words": 32, "vocab_size": 24 }
66
Python
41
8198943edd73a363c266633e1aa5b2a9e9c9f526
cmd.py
221,346
15
129
parseline
https://github.com/XX-net/XX-Net.git
add python 3.10.4 for windows
203
0
56,361
14
11
39
def filter_queryset(self, request, queryset, view): fields = set(view.get_available_fields(queryset.model, db_fields_only=True)) # Locale is a database field, but we provide a separate filter for it if "locale" in fields: fields.remove("locale") for field_name, value in request.GET.items(): if field_name in fields: try: field = queryset.model._meta.get_field(field_name) except LookupError: field = None # Convert value into python try: if isinstance( field, (models.BooleanField, models.NullBooleanField) ): value = parse_boolean(value) elif isinstance(field, (models.IntegerField, models.AutoField)): value = int(value) elif isinstance(field, models.ForeignKey): value = field.target_field.get_prep_value(value) except ValueError as e: raise BadRequestError( "field filter error. '%s' is not a valid value for %s (%s)" % (value, field_name, str(e)) ) if isinstance(field, TaggableManager): for tag in value.split(","): queryset = queryset.filter(**{field_name + "__name": tag}) # Stick a message on the queryset to indicate that tag filtering has been performed # This will let the do_search method know that it must raise an error as searching # and tag filtering at the same time is not supported queryset._filtered_by_tag = True else: queryset = queryset.filter(**{field_name: value}) return queryset
wagtail/api/v2/filters.py
359
wagtail
{ "docstring": "\n This performs field level filtering on the result set\n Eg: ?title=James Joyce\n ", "language": "en", "n_whitespaces": 34, "n_words": 12, "vocab_size": 12 }
162
Python
108
d10f15e55806c6944827d801cd9c2d53f5da4186
filters.py
72,652
31
220
filter_queryset
https://github.com/wagtail/wagtail.git
Reformat with black
758
0
15,909
21
1
4
def _generate_examples(self, **kwargs): raise NotImplementedError()
src/datasets/builder.py
24
datasets
{ "docstring": "Default function generating examples for each `SplitGenerator`.\n\n This function preprocess the examples from the raw data to the preprocessed\n dataset files.\n This function is called once for each `SplitGenerator` defined in\n `_split_generators`. The examples yielded here will be written on\n disk.\n\n Args:\n **kwargs (additional keyword arguments): Arguments forwarded from the SplitGenerator.gen_kwargs\n\n Yields:\n key: `str` or `int`, a unique deterministic example identification key.\n * Unique: An error will be raised if two examples are yield with the\n same key.\n * Deterministic: When generating the dataset twice, the same example\n should have the same key.\n Good keys can be the image id, or line number if examples are extracted\n from a text file.\n The key will be hashed and sorted to shuffle examples deterministically,\n such as generating the dataset multiple times keep examples in the\n same order.\n example: `dict<str feature_name, feature_value>`, a feature dictionary\n ready to be encoded and written to disk. The example will be\n encoded with `self.info.features.encode_example({...})`.\n ", "language": "en", "n_whitespaces": 419, "n_words": 157, "vocab_size": 98 }
5
Python
5
5669b8c8d75b8c3106abd23f21d902d1f020e25d
builder.py
105,068
2
13
_generate_examples
https://github.com/huggingface/datasets.git
Add missing kwargs to docstrings (#4446)
19
0
22,061
7
2
8
def get_delayed_update_fields(self): self.extra_update_fields['emitted_events'] = self.event_ct if 'got an unexpected keyword argument' in self.extra_update_fields.get('result_traceback', ''): self.delay_update(result_traceback=ANSIBLE_RUNNER_NEEDS_UPDATE_MESSAGE) return self.extra_update_fields
awx/main/tasks/callback.py
76
awx
{ "docstring": "Return finalized dict of all fields that should be saved along with the job status change", "language": "en", "n_whitespaces": 15, "n_words": 16, "vocab_size": 16 }
17
Python
17
452744b67e02823879e722fe574984a2d760ed60
callback.py
81,164
5
42
get_delayed_update_fields
https://github.com/ansible/awx.git
Delay update of artifacts and error fields until final job save (#11832) * Delay update of artifacts until final job save Save tracebacks from receptor module to callback object Move receptor traceback check up to be more logical Use new mock_me fixture to avoid DB call with me method Update the special runner message to the delay_update pattern * Move special runner message into post-processing of callback fields
56
0
17,165
10
3
33
def forward_train(self, feat, out_enc, targets, valid_ratios): tgt_embedding = self.embedding(targets) n, c_enc, h, w = out_enc.shape assert c_enc == self.dim_model _, c_feat, _, _ = feat.shape assert c_feat == self.dim_input _, len_q, c_q = tgt_embedding.shape assert c_q == self.dim_model assert len_q <= self.max_seq_len query, _ = self.sequence_layer(tgt_embedding) query = paddle.transpose(query, (0, 2, 1)) key = paddle.reshape(out_enc, [n, c_enc, h * w]) if self.encode_value: value = key else: value = paddle.reshape(feat, [n, c_feat, h * w]) # mask = None # if valid_ratios is not None: # mask = paddle.zeros(shape=[n, len_q, h, w], dtype='bool') # for i, valid_ratio in enumerate(valid_ratios): # valid_width = min(w, math.ceil(w * valid_ratio)) # if valid_width < w: # mask[i, :, :, valid_width:] = True # # mask = mask.view(n, h * w) # mask = paddle.reshape(mask, (n, len_q, h * w)) attn_out = self.attention_layer(query, key, value, h, w, valid_ratios) # attn_out = attn_out.permute(0, 2, 1).contiguous() attn_out = paddle.transpose(attn_out, (0, 2, 1)) if self.return_feature: return attn_out out = self.prediction(attn_out) return out
ppocr/modeling/heads/rec_robustscanner_head.py
300
PaddleOCR
{ "docstring": "\n Args:\n feat (Tensor): Tensor of shape :math:`(N, D_i, H, W)`.\n out_enc (Tensor): Encoder output of shape\n :math:`(N, D_m, H, W)`.\n targets (Tensor): a tensor of shape :math:`(N, T)`. Each element is the index of a\n character.\n valid_ratios (Tensor): valid length ratio of img.\n Returns:\n Tensor: A raw logit tensor of shape :math:`(N, T, C-1)` if\n ``return_feature=False``. Otherwise it would be the hidden feature\n before the prediction projection layer, whose shape is\n :math:`(N, T, D_m)`.\n ", "language": "en", "n_whitespaces": 214, "n_words": 74, "vocab_size": 50 }
163
Python
91
63484257442362057ab4ea4acd769d52d42da9f1
rec_robustscanner_head.py
23,815
22
200
forward_train
https://github.com/PaddlePaddle/PaddleOCR.git
add robustscanner
443
0
4,647
13
6
15
def normalize_path_patterns(patterns): patterns = [os.path.normcase(p) for p in patterns] dir_suffixes = {"%s*" % path_sep for path_sep in {"/", os.sep}} norm_patterns = [] for pattern in patterns: for dir_suffix in dir_suffixes: if pattern.endswith(dir_suffix): norm_patterns.append(pattern[: -len(dir_suffix)]) break else: norm_patterns.append(pattern) return norm_patterns
django/core/management/utils.py
141
django
{ "docstring": "Normalize an iterable of glob style patterns based on OS.", "language": "en", "n_whitespaces": 9, "n_words": 10, "vocab_size": 10 }
39
Python
29
9c19aff7c7561e3a82978a272ecdaad40dda5c00
utils.py
204,719
12
86
normalize_path_patterns
https://github.com/django/django.git
Refs #33476 -- Reformatted code with Black.
123
0
50,851
18
2
4
def details(self): return self._details if self._details else self.og_exception.details()
jina/excepts.py
37
jina
{ "docstring": "\n :return: details of this exception\n ", "language": "en", "n_whitespaces": 20, "n_words": 5, "vocab_size": 5 }
8
Python
7
072a47a4fa97aca68203882e1ef809681a523097
excepts.py
12,341
2
22
details
https://github.com/jina-ai/jina.git
feat: better error messages when gateway can't connect to other deployment (#4677)
22
0
2,262
9
1
6
def bind(self, *args, **kwargs) -> DeploymentNode: raise NotImplementedError()
python/ray/serve/api.py
31
ray
{ "docstring": "Bind the provided arguments and return a DeploymentNode.\n\n The returned bound deployment can be deployed or bound to other\n deployments to create a multi-deployment application.\n ", "language": "en", "n_whitespaces": 46, "n_words": 25, "vocab_size": 22 }
8
Python
8
f646d3fc312f63a6cf3e59a00ae1b3d6ab40393a
api.py
146,461
7
18
bind
https://github.com/ray-project/ray.git
[serve] Add unimplemented interfaces for Deployment DAG APIs (#23125) Adds the following interfaces (without implementation, for discussion / approval): - `serve.Application` - `serve.DeploymentNode` - `serve.DeploymentMethodNode`, `serve.DAGHandle`, and `serve.drivers.PipelineDriver` - `serve.run` & `serve.build` In addition to these Python APIs, we will also support the following CLI commands: - `serve run [--blocking=true] my_file:my_node_or_app # Uses Ray client, blocking by default.` - `serve build my_file:my_node output_path.yaml` - `serve deploy [--blocking=false] # Uses REST API, non-blocking by default.` - `serve status [--watch=false] # Uses REST API, non-blocking by default.`
22
0
33,689
7
1
3
def _inflate_g(g, n): # TODO should this be a method of meijerg? # See: [L, page 150, equation (5)]
sympy/integrals/meijerint.py
17
sympy
{ "docstring": " Return C, h such that h is a G function of argument z**n and\n g = C*h. ", "language": "en", "n_whitespaces": 25, "n_words": 17, "vocab_size": 16 }
19
Python
18
e94a7b45d7b033ccbd57395dca28b654f875c54c
meijerint.py
198,410
8
118
_inflate_g
https://github.com/sympy/sympy.git
Improve loop performance
28
0
48,919
6
56
66
def paramToDict(place, parameters=None): testableParameters = OrderedDict() if place in conf.parameters and not parameters: parameters = conf.parameters[place] parameters = re.sub(r"&(\w{1,4});", r"%s\g<1>%s" % (PARAMETER_AMP_MARKER, PARAMETER_SEMICOLON_MARKER), parameters) if place == PLACE.COOKIE: splitParams = parameters.split(conf.cookieDel or DEFAULT_COOKIE_DELIMITER) else: splitParams = parameters.split(conf.paramDel or DEFAULT_GET_POST_DELIMITER) for element in splitParams: element = re.sub(r"%s(.+?)%s" % (PARAMETER_AMP_MARKER, PARAMETER_SEMICOLON_MARKER), r"&\g<1>;", element) parts = element.split("=") if len(parts) >= 2: parameter = urldecode(parts[0].replace(" ", "")) if not parameter: continue if conf.paramDel and conf.paramDel == '\n': parts[-1] = parts[-1].rstrip() condition = not conf.testParameter condition |= conf.testParameter is not None and parameter in conf.testParameter condition |= place == PLACE.COOKIE and len(intersect((PLACE.COOKIE,), conf.testParameter, True)) > 0 if condition: value = "=".join(parts[1:]) if parameter in (conf.base64Parameter or []): try: kb.base64Originals[parameter] = oldValue = value value = urldecode(value, convall=True) value = decodeBase64(value, binary=False, encoding=conf.encoding or UNICODE_ENCODING) parameters = re.sub(r"\b%s(\b|\Z)" % re.escape(oldValue), value, parameters) except: errMsg = "parameter '%s' does not contain " % parameter errMsg += "valid Base64 encoded value ('%s')" % value raise SqlmapValueException(errMsg) testableParameters[parameter] = value if not conf.multipleTargets and not (conf.csrfToken and re.search(conf.csrfToken, parameter, re.I)): _ = urldecode(testableParameters[parameter], convall=True) if (_.endswith("'") and _.count("'") == 1 or re.search(r'\A9{3,}', _) or re.search(r'\A-\d+\Z', _) or re.search(DUMMY_USER_INJECTION, _)) and not parameter.upper().startswith(GOOGLE_ANALYTICS_COOKIE_PREFIX): warnMsg = "it appears that you have provided tainted parameter values " warnMsg += "('%s') with most likely leftover " % element warnMsg += "chars/statements from manual SQL injection test(s). " warnMsg += "Please, always use only valid parameter values " warnMsg += "so sqlmap could be able to run properly" logger.warning(warnMsg) message = "are you really sure that you want to continue (sqlmap could have problems)? [y/N] " if not readInput(message, default='N', boolean=True): raise SqlmapSilentQuitException elif not _: warnMsg = "provided value for parameter '%s' is empty. " % parameter warnMsg += "Please, always use only valid parameter values " warnMsg += "so sqlmap could be able to run properly" logger.warning(warnMsg) if place in (PLACE.POST, PLACE.GET): for regex in (r"\A((?:<[^>]+>)+\w+)((?:<[^>]+>)+)\Z", r"\A([^\w]+.*\w+)([^\w]+)\Z"): match = re.search(regex, testableParameters[parameter]) if match: try: candidates = OrderedDict()
lib/core/common.py
868
def paramToDict(place, parameters=None): """ Split the parameters into names and values, check if these parameters are within the testable parameters and return in a dictionary. """ testableParameters = OrderedDict() if place in conf.parameters and not parameters: parameters = conf.parameters[place] parameters = re.sub(r"&(\w{1,4});", r"%s\g<1>%s" % (PARAMETER_AMP_MARKER, PARAMETER_SEMICOLON_MARKER), parameters) if place == PLACE.COOKIE: splitParams = parameters.split(conf.cookieDel or DEFAULT_COOKIE_DELIMITER) else: splitParams = parameters.split(conf.paramDel or DEFAULT_GET_POST_DELIMITER) for element in splitParams: element = re.sub(r"%s(.+?)%s" % (PARAMETER_AMP_MARKER, PARAMETER_SEMICOLON_MARKER), r"&\g<1>;", element) parts = element.split("=") if len(parts) >= 2: parameter = urldecode(parts[0].replace(" ", "")) if not parameter: continue if conf.paramDel and conf.paramDel == '\n': parts[-1] = parts[-1].rstrip() condition = not conf.testParameter condition |= conf.testParameter is not None and parameter in conf.testParameter condition |= place == PLACE.COOKIE and len(intersect((PLACE.COOKIE,), conf.testParameter, True)) > 0 if condition: value = "=".join(parts[1:]) if parameter in (conf.base64Parameter or []): try: kb.base64Originals[parameter] = oldValue = value value = urldecode(value, convall=True) value = decodeBase64(value, binary=False, encoding=conf.encoding or UNICODE_ENCODING) parameters = re.sub(r"\b%s(\b|\Z)" % re.escape(oldValue), value, parameters) except: errMsg = "parameter '%s' does not contain " % parameter errMsg += "valid Base64 encoded value ('%s')" % value raise SqlmapValueException(errMsg) testableParameters[parameter] = value if not conf.multipleTargets and not (conf.csrfToken and re.search(conf.csrfToken, parameter, re.I)): _ = urldecode(testableParameters[parameter], convall=True) if (_.endswith("'") and _.count("'") == 1 or re.search(r'\A9{3,}', _) or re.search(r'\A-\d+\Z', _) or re.search(DUMMY_USER_INJECTION, _)) and not parameter.upper().startswith(GOOGLE_ANALYTICS_COOKIE_PREFIX): warnMsg = "it appears that you have provided tainted parameter values " warnMsg += "('%s') with most likely leftover " % element warnMsg += "chars/statements from manual SQL injection test(s). " warnMsg += "Please, always use only valid parameter values " warnMsg += "so sqlmap could be able to run properly" logger.warning(warnMsg) message = "are you really sure that you want to continue (sqlmap could have problems)? [y/N] " if not readInput(message, default='N', boolean=True): raise SqlmapSilentQuitException elif not _: warnMsg = "provided value for parameter '%s' is empty. " % parameter warnMsg += "Please, always use only valid parameter values " warnMsg += "so sqlmap could be able to run properly" logger.warning(warnMsg) if place in (PLACE.POST, PLACE.GET): for regex in (r"\A((?:<[^>]+>)+\w+)((?:<[^>]+>)+)\Z", r"\A([^\w]+.*\w+)([^\w]+)\Z"): match = re.search(regex, testableParameters[parameter]) if match: try: candidates = OrderedDict()
sqlmap
{ "docstring": "\n Split the parameters into names and values, check if these parameters\n are within the testable parameters and return in a dictionary.\n ", "language": "en", "n_whitespaces": 31, "n_words": 21, "vocab_size": 17 }
325
Python
170
df4293473d2fb6e887e31522cab5aff95e201581
common.py
123,469
110
979
paramToDict
https://github.com/sqlmapproject/sqlmap.git
Fixing DeprecationWarning (logger.warn)
1,252
1
27,381
15
19
29
def getcolor(self, color, image=None): if self.rawmode: raise ValueError("palette contains raw palette data") if isinstance(color, tuple): if self.mode == "RGB" and len(color) == 4: if color[3] == 255: color = color[:3] else: raise ValueError( "RGB ImagePalette can't handle non-opaque RGBA colors" ) elif self.mode == "RGBA": if len(color) == 3: color += (255,) try: return self.colors[color] except KeyError as e: # allocate new color slot if not isinstance(self.palette, bytearray): self._palette = bytearray(self.palette) index = len(self.palette) // 3 special_colors = () if image: special_colors = ( image.info.get("background"), image.info.get("transparency"), ) while index in special_colors: index += 1 if index >= 256: if image: # Search for an unused index for i, count in reversed(list(enumerate(image.histogram()))): if count == 0 and i not in special_colors: index = i break if index >= 256: raise ValueError("cannot allocate more than 256 colors") from e self.colors[color] = index if index * 3 < len(self.palette): self._palette = ( self.palette[: index * 3] + bytes(color) + self.palette[index * 3 + 3 :] ) else: self._palette += bytes(color) self.dirty = 1 return index else: raise ValueError(f"unknown color specifier: {repr(color)}")
src/PIL/ImagePalette.py
500
Pillow
{ "docstring": "Given an rgb tuple, allocate palette entry.\n\n .. warning:: This method is experimental.\n ", "language": "en", "n_whitespaces": 27, "n_words": 13, "vocab_size": 13 }
178
Python
102
f9a3178bb34e6b28bc46d42ef88f5069ebabde32
ImagePalette.py
243,502
49
299
getcolor
https://github.com/python-pillow/Pillow.git
Fix #6652: Handle translucent color used in RGB ImagePallete
1,055
0
70,038
22
7
38
def draw_masks(ax, img, masks, color=None, with_edge=True, alpha=0.8): taken_colors = set([0, 0, 0]) if color is None: random_colors = np.random.randint(0, 255, (masks.size(0), 3)) color = [tuple(c) for c in random_colors] color = np.array(color, dtype=np.uint8) polygons = [] for i, mask in enumerate(masks): if with_edge: contours, _ = bitmap_to_polygon(mask) polygons += [Polygon(c) for c in contours] color_mask = color[i] while tuple(color_mask) in taken_colors: color_mask = _get_bias_color(color_mask) taken_colors.add(tuple(color_mask)) mask = mask.astype(bool) img[mask] = img[mask] * (1 - alpha) + color_mask * alpha p = PatchCollection( polygons, facecolor='none', edgecolors='w', linewidths=1, alpha=0.8) ax.add_collection(p) return ax, img
mmdet/core/visualization/image.py
325
mmdetection
{ "docstring": "Draw masks on the image and their edges on the axes.\n\n Args:\n ax (matplotlib.Axes): The input axes.\n img (ndarray): The image with the shape of (3, h, w).\n masks (ndarray): The masks with the shape of (n, h, w).\n color (ndarray): The colors for each masks with the shape\n of (n, 3).\n with_edge (bool): Whether to draw edges. Default: True.\n alpha (float): Transparency of bounding boxes. Default: 0.8.\n\n Returns:\n matplotlib.Axes: The result axes.\n ndarray: The result image.\n ", "language": "en", "n_whitespaces": 153, "n_words": 77, "vocab_size": 47 }
91
Python
66
301d4a2d4cfe1cdb62608e2892924be3e67e3098
image.py
243,968
21
217
draw_masks
https://github.com/open-mmlab/mmdetection.git
[Feature] Support visualization for Panoptic Segmentation (#7041) * First commit of v2 * split the functions * Support to show panoptic result * temp * Support to show gt * support show gt * fix lint * Support to browse datasets * Fix unit tests * Fix findContours * fix comments * Fix pre-commit * fix lint * Add the type of an argument
218
0
70,170
13
9
19
def convert_to_experiment_list(experiments): exp_list = experiments # Transform list if necessary if experiments is None: exp_list = [] elif isinstance(experiments, Experiment): exp_list = [experiments] elif type(experiments) is dict: exp_list = [ Experiment.from_json(name, spec) for name, spec in experiments.items() ] # Validate exp_list if type(exp_list) is list and all(isinstance(exp, Experiment) for exp in exp_list): if len(exp_list) > 1: logger.info( "Running with multiple concurrent experiments. " "All experiments will be using the same SearchAlgorithm." ) else: raise TuneError("Invalid argument: {}".format(experiments)) return exp_list
python/ray/tune/experiment.py
188
ray
{ "docstring": "Produces a list of Experiment objects.\n\n Converts input from dict, single experiment, or list of\n experiments to list of experiments. If input is None,\n will return an empty list.\n\n Arguments:\n experiments (Experiment | list | dict): Experiments to run.\n\n Returns:\n List of experiments.\n ", "language": "en", "n_whitespaces": 75, "n_words": 43, "vocab_size": 32 }
79
Python
59
7f1bacc7dc9caf6d0ec042e39499bbf1d9a7d065
experiment.py
132,194
19
112
convert_to_experiment_list
https://github.com/ray-project/ray.git
[CI] Format Python code with Black (#21975) See #21316 and #21311 for the motivation behind these changes.
214
0
29,694
13
1
23
def test_approx_iou_assigner_with_empty_boxes_and_gt(self): assigner = ApproxMaxIoUAssigner( pos_iou_thr=0.5, neg_iou_thr=0.5, ) bboxes = torch.empty((0, 4)) gt_bboxes = torch.empty((0, 4)) gt_labels = torch.LongTensor([]) pred_instances = InstanceData() pred_instances.priors = bboxes pred_instances.approxs = bboxes[:, None, :] gt_instances = InstanceData() gt_instances.bboxes = gt_bboxes gt_instances.labels = gt_labels assign_result = assigner.assign(pred_instances, gt_instances) self.assertEqual(len(assign_result.gt_inds), 0)
tests/test_core/test_bbox/test_assigners/test_approx_max_iou_assigner.py
178
mmdetection
{ "docstring": "Test corner case where an network might predict no boxes and no\n gt.", "language": "en", "n_whitespaces": 19, "n_words": 13, "vocab_size": 12 }
45
Python
29
bb7239ff635c4d9afd9c37a6e432251029aafb51
test_approx_max_iou_assigner.py
245,072
16
116
test_approx_iou_assigner_with_empty_boxes_and_gt
https://github.com/open-mmlab/mmdetection.git
Refactor SABL RetinaNet
165
0
70,644
10
1
20
def test_remove_not_installed(): name = "foo" list_pkgs_mock = MagicMock(return_value={}) cmd_mock = MagicMock( return_value={"pid": 12345, "retcode": 0, "stdout": "", "stderr": ""} ) salt_mock = { "cmd.run_all": cmd_mock, "lowpkg.version_cmp": rpm.version_cmp, "pkg_resource.parse_targets": MagicMock( return_value=({name: None}, "repository") ), } with patch.object(yumpkg, "list_pkgs", list_pkgs_mock), patch( "salt.utils.systemd.has_scope", MagicMock(return_value=False) ), patch.dict(yumpkg.__salt__, salt_mock): # Test yum with patch.dict(yumpkg.__context__, {"yum_bin": "yum"}), patch.dict( yumpkg.__grains__, {"os": "CentOS", "osrelease": 7} ): yumpkg.remove(name) cmd_mock.assert_not_called() # Test dnf yumpkg.__context__.pop("yum_bin") cmd_mock.reset_mock() with patch.dict(yumpkg.__context__, {"yum_bin": "dnf"}), patch.dict( yumpkg.__grains__, {"os": "Fedora", "osrelease": 27} ): yumpkg.remove(name) cmd_mock.assert_not_called()
tests/pytests/unit/modules/test_yumpkg.py
379
salt
{ "docstring": "\n Tests that no exception raised on removing not installed package\n ", "language": "en", "n_whitespaces": 17, "n_words": 10, "vocab_size": 10 }
78
Python
60
8ea5342cbde034383938e244cdb16a0bf8a777e8
test_yumpkg.py
216,118
28
212
test_remove_not_installed
https://github.com/saltstack/salt.git
Fix exception in yumpkg.remove for not installed package
280
0
54,409
14
10
25
def forward(self, tgt, memory, tgt_mask=None, memory_mask=None, cache=None): r tgt_mask = _convert_attention_mask(tgt_mask, tgt.dtype) memory_mask = _convert_attention_mask(memory_mask, memory.dtype) residual = tgt if self.normalize_before: tgt = self.norm1(tgt) if cache is None: tgt = self.self_attn(tgt, tgt, tgt, tgt_mask, None) else: tgt, incremental_cache = self.self_attn(tgt, tgt, tgt, tgt_mask, cache[0]) tgt = residual + self.dropout1(tgt) if not self.normalize_before: tgt = self.norm1(tgt) residual = tgt if self.normalize_before: tgt = self.norm2(tgt) if cache is None: tgt = self.cross_attn(tgt, memory, memory, memory_mask, None) else: tgt, static_cache = self.cross_attn(tgt, memory, memory, memory_mask, cache[1]) tgt = residual + self.dropout2(tgt) if not self.normalize_before: tgt = self.norm2(tgt) residual = tgt if self.normalize_before: tgt = self.norm3(tgt) tgt = self.linear2(self.dropout(self.activation(self.linear1(tgt)))) tgt = residual + self.dropout3(tgt) if not self.normalize_before: tgt = self.norm3(tgt) return tgt if cache is None else (tgt, (incremental_cache, static_cache))
examples/model_interpretation/task/transformer.py
437
PaddleNLP
{ "docstring": "\n Applies a Transformer decoder layer on the input.\n\n Parameters:\n tgt (Tensor): The input of Transformer decoder layer. It is a tensor\n with shape `[batch_size, target_length, d_model]`. The data type\n should be float32 or float64.\n memory (Tensor): The output of Transformer encoder. It is a tensor\n with shape `[batch_size, source_length, d_model]`. The data type\n should be float32 or float64.\n tgt_mask (Tensor, optional): A tensor used in self attention\n to prevents attention to some unwanted positions, usually the\n the subsequent positions. It is a tensor with shape broadcasted\n to `[batch_size, n_head, target_length, target_length]`.\n When the data type is bool, the unwanted positions have `False` \n values and the others have `True` values. When the data type is \n int, the unwanted positions have 0 values and the others have 1 \n values. When the data type is float, the unwanted positions have \n `-INF` values and the others have 0 values. It can be None when \n nothing wanted or needed to be prevented attention to. Default None.\n memory_mask (Tensor, optional): A tensor used in decoder-encoder\n cross attention to prevents attention to some unwanted positions,\n usually the paddings. It is a tensor with shape broadcasted to \n `[batch_size, n_head, target_length, source_length]`. When the \n data type is bool, the unwanted positions have `False` values \n and the others have `True` values. When the data type is int, \n the unwanted positions have 0 values and the others have 1 \n values. When the data type is float, the unwanted positions have \n `-INF` values and the others have 0 values. It can be None when \n nothing wanted or needed to be prevented attention to. Default None.\n cache (tuple, optional): It is a tuple( :code:`(incremental_cache, static_cache)` ),\n `incremental_cache` is an instance of `MultiHeadAttention.Cache`,\n `static_cache` is an instance of `MultiHeadAttention.StaticCache.\n See `TransformerDecoderLayer.gen_cache` for more details. It is\n only used for inference and should be None for training. Default\n None.\n\n Returns:\n Tensor|tuple: It is a tensor that has the same shape and data type \\\n as `tgt`, representing the output of Transformer decoder layer. \\\n Or a tuple if `cache` is not None, except for decoder layer output, \\\n the tuple includes the new cache which is same as input `cache` \\\n argument but `incremental_cache` in it has an incremental length. \\\n See `MultiHeadAttention.gen_cache` and `MultiHeadAttention.forward` \\\n for more details.\n ", "language": "en", "n_whitespaces": 976, "n_words": 374, "vocab_size": 130 }
126
Python
47
93cae49c0c572b5c1ac972759140fbe924b0374d
transformer.py
323,000
80
290
forward
https://github.com/PaddlePaddle/PaddleNLP.git
Add NLP model interpretation (#1752) * upload NLP interpretation * fix problems and relocate project * remove abandoned picture * remove abandoned picture * fix dead link in README * fix dead link in README * fix code style problems * fix CR round 1 * remove .gitkeep files * fix code style * fix file encoding problem * fix code style * delete duplicated files due to directory rebuild * fix CR round 2 * fix code style * fix ernie tokenizer * fix code style * fix problem from CR round 1 * fix bugs * fix README * remove duplicated files * deal with diff of old and new tokenizer results * fix CR round 4 * fix code style * add missing dependence * fix broken import path * move some data file to cloud * MRC upper case to lower case Co-authored-by: Zeyu Chen <[email protected]> Co-authored-by: binlinquge <xxx> Co-authored-by: Guo Sheng <[email protected]>
527
0
118,328
14
2
10
def forgiving_int(value, default=_SENTINEL, base=10): result = jinja2.filters.do_int(value, default=default, base=base) if result is _SENTINEL: raise_no_default("int", value) return result
homeassistant/helpers/template.py
71
core
{ "docstring": "Try to convert value to an int, and raise if it fails.", "language": "en", "n_whitespaces": 11, "n_words": 12, "vocab_size": 11 }
17
Python
15
4885331509eeffe50f42d76b234996467b06170f
template.py
300,603
5
45
forgiving_int
https://github.com/home-assistant/core.git
Fail template functions when no default specified (#71687)
36
0
99,463
10
36
55
def norm(x, ord=None, axis=None, keepdims=False): x = asarray(x) if not issubclass(x.dtype.type, (inexact, object_)): x = x.astype(float) # Immediately handle some default, simple, fast, and common cases. if axis is None: ndim = x.ndim if ((ord is None) or (ord in ('f', 'fro') and ndim == 2) or (ord == 2 and ndim == 1)): x = x.ravel(order='K') if isComplexType(x.dtype.type): sqnorm = dot(x.real, x.real) + dot(x.imag, x.imag) else: sqnorm = dot(x, x) ret = sqrt(sqnorm) if keepdims: ret = ret.reshape(ndim*[1]) return ret # Normalize the `axis` argument to a tuple. nd = x.ndim if axis is None: axis = tuple(range(nd)) elif not isinstance(axis, tuple): try: axis = int(axis) except Exception as e: raise TypeError("'axis' must be None, an integer or a tuple of integers") from e axis = (axis,) if len(axis) == 1: if ord == Inf: return abs(x).max(axis=axis, keepdims=keepdims) elif ord == -Inf: return abs(x).min(axis=axis, keepdims=keepdims) elif ord == 0: # Zero norm return (x != 0).astype(x.real.dtype).sum(axis=axis, keepdims=keepdims) elif ord == 1: # special case for speedup return add.reduce(abs(x), axis=axis, keepdims=keepdims) elif ord is None or ord == 2: # special case for speedup s = (x.conj() * x).real return sqrt(add.reduce(s, axis=axis, keepdims=keepdims)) # None of the str-type keywords for ord ('fro', 'nuc') # are valid for vectors elif isinstance(ord, str): raise ValueError(f"Invalid norm order '{ord}' for vectors") else: absx = abs(x) absx **= ord ret = add.reduce(absx, axis=axis, keepdims=keepdims) ret **= reciprocal(ord, dtype=ret.dtype) return ret elif len(axis) == 2: row_axis, col_axis = axis row_axis = normalize_axis_index(row_axis, nd) col_axis = normalize_axis_index(col_axis, nd) if row_axis == col_axis: raise ValueError('Duplicate axes given.') if ord == 2: ret = _multi_svd_norm(x, row_axis, col_axis, amax) elif ord == -2: ret = _multi_svd_norm(x, row_axis, col_axis, amin) elif ord == 1: if col_axis > row_axis: col_axis -= 1 ret = add.reduce(abs(x), axis=row_axis).max(axis=col_axis) elif ord == Inf: if row_axis > col_axis: row_axis -= 1 ret = add.reduce(abs(x), axis=col_axis).max(axis=row_axis) elif ord == -1: if col_axis > row_axis: col_axis -= 1 ret = add.reduce(abs(x), axis=row_axis).min(axis=col_axis) elif ord == -Inf: if row_axis > col_axis: row_axis -= 1 ret = add.reduce(abs(x), axis=col_axis).min(axis=row_axis) elif ord in [None, 'fro', 'f']: ret = sqrt(add.reduce((x.conj() * x).real, axis=axis)) elif ord == 'nuc': ret = _multi_svd_norm(x, row_axis, col_axis, sum) else: raise ValueError("Invalid norm order for matrices.") if keepdims: ret_shape = list(x.shape) ret_shape[axis[0]] = 1 ret_shape[axis[1]] = 1 ret = ret.reshape(ret_shape) return ret else: raise ValueError("Improper number of dimensions to norm.") # multi_dot
numpy/linalg/linalg.py
1,183
numpy
{ "docstring": "\n Matrix or vector norm.\n\n This function is able to return one of eight different matrix norms,\n or one of an infinite number of vector norms (described below), depending\n on the value of the ``ord`` parameter.\n\n Parameters\n ----------\n x : array_like\n Input array. If `axis` is None, `x` must be 1-D or 2-D, unless `ord`\n is None. If both `axis` and `ord` are None, the 2-norm of\n ``x.ravel`` will be returned.\n ord : {non-zero int, inf, -inf, 'fro', 'nuc'}, optional\n Order of the norm (see table under ``Notes``). inf means numpy's\n `inf` object. The default is None.\n axis : {None, int, 2-tuple of ints}, optional.\n If `axis` is an integer, it specifies the axis of `x` along which to\n compute the vector norms. If `axis` is a 2-tuple, it specifies the\n axes that hold 2-D matrices, and the matrix norms of these matrices\n are computed. If `axis` is None then either a vector norm (when `x`\n is 1-D) or a matrix norm (when `x` is 2-D) is returned. The default\n is None.\n\n .. versionadded:: 1.8.0\n\n keepdims : bool, optional\n If this is set to True, the axes which are normed over are left in the\n result as dimensions with size one. With this option the result will\n broadcast correctly against the original `x`.\n\n .. versionadded:: 1.10.0\n\n Returns\n -------\n n : float or ndarray\n Norm of the matrix or vector(s).\n\n See Also\n --------\n scipy.linalg.norm : Similar function in SciPy.\n\n Notes\n -----\n For values of ``ord < 1``, the result is, strictly speaking, not a\n mathematical 'norm', but it may still be useful for various numerical\n purposes.\n\n The following norms can be calculated:\n\n ===== ============================ ==========================\n ord norm for matrices norm for vectors\n ===== ============================ ==========================\n None Frobenius norm 2-norm\n 'fro' Frobenius norm --\n 'nuc' nuclear norm --\n inf max(sum(abs(x), axis=1)) max(abs(x))\n -inf min(sum(abs(x), axis=1)) min(abs(x))\n 0 -- sum(x != 0)\n 1 max(sum(abs(x), axis=0)) as below\n -1 min(sum(abs(x), axis=0)) as below\n 2 2-norm (largest sing. value) as below\n -2 smallest singular value as below\n other -- sum(abs(x)**ord)**(1./ord)\n ===== ============================ ==========================\n\n The Frobenius norm is given by [1]_:\n\n :math:`||A||_F = [\\\\sum_{i,j} abs(a_{i,j})^2]^{1/2}`\n\n The nuclear norm is the sum of the singular values.\n\n Both the Frobenius and nuclear norm orders are only defined for\n matrices and raise a ValueError when ``x.ndim != 2``.\n\n References\n ----------\n .. [1] G. H. Golub and C. F. Van Loan, *Matrix Computations*,\n Baltimore, MD, Johns Hopkins University Press, 1985, pg. 15\n\n Examples\n --------\n >>> from numpy import linalg as LA\n >>> a = np.arange(9) - 4\n >>> a\n array([-4, -3, -2, ..., 2, 3, 4])\n >>> b = a.reshape((3, 3))\n >>> b\n array([[-4, -3, -2],\n [-1, 0, 1],\n [ 2, 3, 4]])\n\n >>> LA.norm(a)\n 7.745966692414834\n >>> LA.norm(b)\n 7.745966692414834\n >>> LA.norm(b, 'fro')\n 7.745966692414834\n >>> LA.norm(a, np.inf)\n 4.0\n >>> LA.norm(b, np.inf)\n 9.0\n >>> LA.norm(a, -np.inf)\n 0.0\n >>> LA.norm(b, -np.inf)\n 2.0\n\n >>> LA.norm(a, 1)\n 20.0\n >>> LA.norm(b, 1)\n 7.0\n >>> LA.norm(a, -1)\n -4.6566128774142013e-010\n >>> LA.norm(b, -1)\n 6.0\n >>> LA.norm(a, 2)\n 7.745966692414834\n >>> LA.norm(b, 2)\n 7.3484692283495345\n\n >>> LA.norm(a, -2)\n 0.0\n >>> LA.norm(b, -2)\n 1.8570331885190563e-016 # may vary\n >>> LA.norm(a, 3)\n 5.8480354764257312 # may vary\n >>> LA.norm(a, -3)\n 0.0\n\n Using the `axis` argument to compute vector norms:\n\n >>> c = np.array([[ 1, 2, 3],\n ... [-1, 1, 4]])\n >>> LA.norm(c, axis=0)\n array([ 1.41421356, 2.23606798, 5. ])\n >>> LA.norm(c, axis=1)\n array([ 3.74165739, 4.24264069])\n >>> LA.norm(c, ord=1, axis=1)\n array([ 6., 6.])\n\n Using the `axis` argument to compute matrix norms:\n\n >>> m = np.arange(8).reshape(2,2,2)\n >>> LA.norm(m, axis=(1,2))\n array([ 3.74165739, 11.22497216])\n >>> LA.norm(m[0, :, :]), LA.norm(m[1, :, :])\n (3.7416573867739413, 11.224972160321824)\n\n ", "language": "en", "n_whitespaces": 1267, "n_words": 581, "vocab_size": 322 }
395
Python
178
0d13f9f747887b290108a909dd92c3cb47239921
linalg.py
160,203
87
746
norm
https://github.com/numpy/numpy.git
BUG: Consistent promotion for norm for all values of ord (#17709) Previously, numpy.linalg.norm would return values with the same floating-point type as input arrays for most values of the ``ord`` parameter, but not all. This PR fixes this so that the output dtype matches the input for all (valid) values of ``ord``. Co-authored-by: Kenichi Maehashi <[email protected]> Co-authored-by: Ross Barnowski <[email protected]>
1,233
0
38,571
21
1
38
def test_multiple_tags_return_distinct_objects_with_seperate_config_contexts(self): site = Site.objects.first() platform = Platform.objects.first() tenant = Tenant.objects.first() tag1, tag2 = list(Tag.objects.all()) tag_context_1 = ConfigContext.objects.create( name="tag-1", weight=100, data={ "tag": 1 } ) tag_context_1.tags.add(tag1) tag_context_2 = ConfigContext.objects.create( name="tag-2", weight=100, data={ "tag": 1 } ) tag_context_2.tags.add(tag2) device = Device.objects.create( name="Device 3", site=site, tenant=tenant, platform=platform, device_role=DeviceRole.objects.first(), device_type=DeviceType.objects.first() ) device.tags.set([tag1, tag2]) annotated_queryset = Device.objects.filter(name=device.name).annotate_config_context_data() self.assertEqual(ConfigContext.objects.get_for_object(device).count(), 2) self.assertEqual(device.get_config_context(), annotated_queryset[0].get_config_context())
netbox/extras/tests/test_models.py
358
netbox
{ "docstring": "\n Tagged items use a generic relationship, which results in duplicate rows being returned when queried.\n This is combatted by by appending distinct() to the config context querysets. This test creates a config\n context assigned to two tags and ensures objects related by those same two tags result in only a single\n config context record being returned.\n\n This test case is seperate from the above in that it deals with multiple config context objects in play.\n\n See https://github.com/netbox-community/netbox/issues/5387\n ", "language": "en", "n_whitespaces": 127, "n_words": 77, "vocab_size": 54 }
57
Python
42
d4a231585ac9a25d9739552d8c9e433dbf9398af
test_models.py
266,200
33
223
test_multiple_tags_return_distinct_objects_with_seperate_config_contexts
https://github.com/netbox-community/netbox.git
Clean up tests
360
0
78,334
12
1
11
def test_recorder_pool(caplog): engine = create_engine("sqlite://", poolclass=RecorderPool) get_session = sessionmaker(bind=engine) shutdown = False connections = []
tests/components/recorder/test_pool.py
55
core
{ "docstring": "Test RecorderPool gives the same connection in the creating thread.", "language": "en", "n_whitespaces": 9, "n_words": 10, "vocab_size": 9 }
15
Python
12
bc862e97ed68cce8c437327651f85892787e755e
test_pool.py
293,727
34
234
test_recorder_pool
https://github.com/home-assistant/core.git
Use a dedicated executor pool for database operations (#68105) Co-authored-by: Erik Montnemery <[email protected]> Co-authored-by: Franck Nijhof <[email protected]>
30
0
92,782
9
8
37
def apply_infer_dtype(func, args, kwargs, funcname, suggest_dtype="dtype", nout=None): from dask.array.utils import meta_from_array # make sure that every arg is an evaluated array args = [ np.ones_like(meta_from_array(x), shape=((1,) * x.ndim), dtype=x.dtype) if is_arraylike(x) else x for x in args ] try: with np.errstate(all="ignore"): o = func(*args, **kwargs) except Exception as e: exc_type, exc_value, exc_traceback = sys.exc_info() tb = "".join(traceback.format_tb(exc_traceback)) suggest = ( ( "Please specify the dtype explicitly using the " "`{dtype}` kwarg.\n\n".format(dtype=suggest_dtype) ) if suggest_dtype else "" ) msg = ( f"`dtype` inference failed in `{funcname}`.\n\n" f"{suggest}" "Original error is below:\n" "------------------------\n" f"{e!r}\n\n" "Traceback:\n" "---------\n" f"{tb}" ) else: msg = None if msg is not None: raise ValueError(msg) return o.dtype if nout is None else tuple(e.dtype for e in o)
dask/array/core.py
341
dask
{ "docstring": "\n Tries to infer output dtype of ``func`` for a small set of input arguments.\n\n Parameters\n ----------\n func: Callable\n Function for which output dtype is to be determined\n\n args: List of array like\n Arguments to the function, which would usually be used. Only attributes\n ``ndim`` and ``dtype`` are used.\n\n kwargs: dict\n Additional ``kwargs`` to the ``func``\n\n funcname: String\n Name of calling function to improve potential error messages\n\n suggest_dtype: None/False or String\n If not ``None`` adds suggestion to potential error message to specify a dtype\n via the specified kwarg. Defaults to ``'dtype'``.\n\n nout: None or Int\n ``None`` if function returns single output, integer if many.\n Deafults to ``None``.\n\n Returns\n -------\n : dtype or List of dtype\n One or many dtypes (depending on ``nout``)\n ", "language": "en", "n_whitespaces": 231, "n_words": 121, "vocab_size": 86 }
119
Python
92
cccb9d8d8e33a891396b1275c2448c352ef40c27
core.py
156,008
37
192
apply_infer_dtype
https://github.com/dask/dask.git
absolufy-imports - No relative - PEP8 (#8796) Conversation in https://github.com/dask/distributed/issues/5889
413
0
36,503
16
2
8
def _cursor(self) -> Optional[Cursor]: if self._provided_cursor is None: return cursor.get_container_cursor(self._root_container) else: return self._provided_cursor
lib/streamlit/delta_generator.py
55
streamlit
{ "docstring": "Return our Cursor. This will be None if we're not running in a\n ScriptThread - e.g., if we're running a \"bare\" script outside of\n Streamlit.\n ", "language": "en", "n_whitespaces": 46, "n_words": 25, "vocab_size": 21 }
13
Python
11
704eab3478cf69847825b23dabf15813a8ac9fa2
delta_generator.py
118,556
9
33
_cursor
https://github.com/streamlit/streamlit.git
Rename and refactor `Report` machinery (#4141) This refactor renames (almost) everything related to the outdated "report" concept with more precise concepts that we use throughout our code, primarily "script run", "session", and "app".
56
0
26,289
10
3
10
def _mysql_tables_where_indexes_already_present(conn): to_check = [ ('xcom', 'idx_xcom_task_instance'), ('task_reschedule', 'idx_task_reschedule_dag_run'), ('task_fail', 'idx_task_fail_task_instance'), ] tables = set() for tbl, idx in to_check: if conn.execute(f"show indexes from {tbl} where Key_name = '{idx}'").first(): tables.add(tbl) return tables
airflow/migrations/versions/0111_2_3_3_add_indexes_for_cascade_deletes.py
115
airflow
{ "docstring": "\n If user downgraded and is upgrading again, we have to check for existing\n indexes on mysql because we can't (and don't) drop them as part of the\n downgrade.\n ", "language": "en", "n_whitespaces": 41, "n_words": 28, "vocab_size": 27 }
32
Python
29
677c42227c08f705142f298ab88915f133cd94e5
0111_2_3_3_add_indexes_for_cascade_deletes.py
43,184
11
61
_mysql_tables_where_indexes_already_present
https://github.com/apache/airflow.git
Add indexes for CASCADE deletes for task_instance (#24488) When we add foreign keys with ON DELETE CASCADE, and we delete rows in the foreign table, the database needs to join back to the referencing table. If there's no suitable index, then it can be slow to perform the deletes.
89
0
7,856
13
1
5
def fit(self, X, y=None): # param validation is done in fit_transform self.fit_transform(X) return self
sklearn/decomposition/_truncated_svd.py
34
scikit-learn
{ "docstring": "Fit model on training data X.\n\n Parameters\n ----------\n X : {array-like, sparse matrix} of shape (n_samples, n_features)\n Training data.\n\n y : Ignored\n Not used, present here for API consistency by convention.\n\n Returns\n -------\n self : object\n Returns the transformer object.\n ", "language": "en", "n_whitespaces": 129, "n_words": 40, "vocab_size": 37 }
14
Python
14
7da7ba603d42398c6e7cf89ea5336b8aabac7bae
_truncated_svd.py
260,585
3
20
fit
https://github.com/scikit-learn/scikit-learn.git
MNT TrucatedSVD uses _validate_parameters (#23987) Co-authored-by: jeremiedbb <[email protected]>
42
0
76,359
7
2
16
def apply_channel_shift(x, intensity, channel_axis=0): x = np.rollaxis(x, channel_axis, 0) min_x, max_x = np.min(x), np.max(x) channel_images = [ np.clip(x_channel + intensity, min_x, max_x) for x_channel in x] x = np.stack(channel_images, axis=0) x = np.rollaxis(x, 0, channel_axis + 1) return x @keras_export('keras.preprocessing.image.random_channel_shift')
keras/preprocessing/image.py
144
@keras_export('keras.preprocessing.image.random_channel_shift')
keras
{ "docstring": "Performs a channel shift.\n\n Args:\n x: Input tensor. Must be 3D.\n intensity: Transformation intensity.\n channel_axis: Index of axis for channels in the input tensor.\n\n Returns:\n Numpy image tensor.\n ", "language": "en", "n_whitespaces": 51, "n_words": 28, "vocab_size": 26 }
40
Python
29
373ad97c72ed1ac4b6898e85b2cfd7b016e4b469
image.py
268,943
8
89
apply_channel_shift
https://github.com/keras-team/keras.git
Copy image utils from keras_preprocessing directly into core keras This is not new code, we are just moving these utilities directly into keras from keras-preprocessing. For the library code, just fixed linting errors. For the test code, had to do more major changes to port from pytest, but hopefully any errors have been caught by the tests themselves. PiperOrigin-RevId: 427274651
51
1
79,775
10
1
19
def _requeue_trial(self, trial): self._scheduler_alg.on_trial_error(self, trial) self.trial_executor.set_status(trial, Trial.PENDING) # TODO(rliaw): Right now, this pushes the trial to the end of queue # because restoration can be expensive. However, this is not # ideal since it just hides the issue - a better fix would # be to use an actor table to detect the IP of the Trainable # and rsync the files there. # See https://github.com/ray-project/ray/issues/5168 self._trials.pop(self._trials.index(trial)) self._trials.append(trial) self._live_trials.add(trial) with warn_if_slow("scheduler.on_trial_add"): self._scheduler_alg.on_trial_add( TrialRunnerWrapper(self, runner_whitelist_attr={"search_alg"}), trial )
python/ray/tune/trial_runner.py
148
ray
{ "docstring": "Notification to TrialScheduler and requeue trial.\n\n This does not notify the SearchAlgorithm because the function\n evaluation is still in progress.\n\n ", "language": "en", "n_whitespaces": 41, "n_words": 20, "vocab_size": 19 }
76
Python
60
7f1bacc7dc9caf6d0ec042e39499bbf1d9a7d065
trial_runner.py
132,862
10
86
_requeue_trial
https://github.com/ray-project/ray.git
[CI] Format Python code with Black (#21975) See #21316 and #21311 for the motivation behind these changes.
204
0
29,841
14
1
43
def test_room_state_returned_when_knocking(self): user_id = self.register_user("u1", "you the one") user_token = self.login("u1", "you the one") fake_knocking_user_id = "@user:other.example.com" # Create a room with a room version that includes knocking room_id = self.helper.create_room_as( "u1", is_public=False, room_version=RoomVersions.V7.identifier, tok=user_token, ) # Update the join rules and add additional state to the room to check for later expected_room_state = self.send_example_state_events_to_room( self.hs, room_id, user_id ) channel = self.make_signed_federation_request( "GET", "/_matrix/federation/v1/make_knock/%s/%s?ver=%s" % ( room_id, fake_knocking_user_id, # Inform the remote that we support the room version of the room we're # knocking on RoomVersions.V7.identifier, ), ) self.assertEquals(200, channel.code, channel.result) # Note: We don't expect the knock membership event to be sent over federation as # part of the stripped room state, as the knocking homeserver already has that # event. It is only done for clients during /sync # Extract the generated knock event json knock_event = channel.json_body["event"] # Check that the event has things we expect in it self.assertEquals(knock_event["room_id"], room_id) self.assertEquals(knock_event["sender"], fake_knocking_user_id) self.assertEquals(knock_event["state_key"], fake_knocking_user_id) self.assertEquals(knock_event["type"], EventTypes.Member) self.assertEquals(knock_event["content"]["membership"], Membership.KNOCK) # Turn the event json dict into a proper event. # We won't sign it properly, but that's OK as we stub out event auth in `prepare` signed_knock_event = builder.create_local_event_from_event_dict( self.clock, self.hs.hostname, self.hs.signing_key, room_version=RoomVersions.V7, event_dict=knock_event, ) # Convert our proper event back to json dict format signed_knock_event_json = signed_knock_event.get_pdu_json( self.clock.time_msec() ) # Send the signed knock event into the room channel = self.make_signed_federation_request( "PUT", "/_matrix/federation/v1/send_knock/%s/%s" % (room_id, signed_knock_event.event_id), signed_knock_event_json, ) self.assertEquals(200, channel.code, channel.result) # Check that we got the stripped room state in return room_state_events = channel.json_body["knock_state_events"] # Validate the stripped room state events self.check_knock_room_state_against_room_state( room_state_events, expected_room_state )
tests/federation/transport/test_knocking.py
454
synapse
{ "docstring": "\n Tests that specific, stripped state events from a room are returned after\n a remote homeserver successfully knocks on a local room.\n ", "language": "en", "n_whitespaces": 43, "n_words": 21, "vocab_size": 19 }
259
Python
149
c3db7a0b59d48b8872bc24096f9a2467ef35f703
test_knocking.py
246,318
50
276
test_room_state_returned_when_knocking
https://github.com/matrix-org/synapse.git
Tests: replace mocked Authenticator with the real thing (#11913) If we prepopulate the test homeserver with a key for a remote homeserver, we can make federation requests to it without having to stub out the authenticator. This has two advantages: * means that what we are testing is closer to reality (ie, we now have complete tests for the incoming-request-authorisation flow) * some tests require that other objects be signed by the remote server (eg, the event in `/send_join`), and doing that would require a whole separate set of mocking out. It's much simpler just to use real keys.
834
0
71,151
12
3
7
def should_overwrite(filepath, overwrite): # If file exists and should not be overwritten. if not overwrite and os.path.isfile(filepath): return ask_to_proceed_with_overwrite(filepath) return True
keras/saving/saving_utils.py
48
keras
{ "docstring": "Returns whether the filepath should be overwritten.", "language": "en", "n_whitespaces": 6, "n_words": 7, "vocab_size": 7 }
21
Python
18
84afc5193d38057e2e2badf9c889ea87d80d8fbf
saving_utils.py
276,251
4
28
should_overwrite
https://github.com/keras-team/keras.git
Reformatting the codebase with black. PiperOrigin-RevId: 450093126
40
0
81,607
9
17
14
def make_system(A, M, x0, b): A_ = A A = aslinearoperator(A) if A.shape[0] != A.shape[1]: raise ValueError(f'expected square matrix, but got shape={(A.shape,)}') N = A.shape[0] b = asanyarray(b) if not (b.shape == (N,1) or b.shape == (N,)): raise ValueError(f'shapes of A {A.shape} and b {b.shape} are ' 'incompatible') if b.dtype.char not in 'fdFD': b = b.astype('d') # upcast non-FP types to double
scipy/sparse/linalg/_isolve/utils.py
194
scipy
{ "docstring": "Make a linear system Ax=b\n\n Parameters\n ----------\n A : LinearOperator\n sparse or dense matrix (or any valid input to aslinearoperator)\n M : {LinearOperator, Nones}\n preconditioner\n sparse or dense matrix (or any valid input to aslinearoperator)\n x0 : {array_like, str, None}\n initial guess to iterative method.\n ``x0 = 'Mb'`` means using the nonzero initial guess ``M @ b``.\n Default is `None`, which means using the zero initial guess.\n b : array_like\n right hand side\n\n Returns\n -------\n (A, M, x, b, postprocess)\n A : LinearOperator\n matrix of the linear system\n M : LinearOperator\n preconditioner\n x : rank 1 ndarray\n initial guess\n b : rank 1 ndarray\n right hand side\n postprocess : function\n converts the solution vector to the appropriate\n type and dimensions (e.g. (N,1) matrix)\n\n ", "language": "en", "n_whitespaces": 303, "n_words": 123, "vocab_size": 77 }
62
Python
48
5628849933f1ba002f34b88b4d3af24f68008b39
utils.py
241,795
51
379
make_system
https://github.com/scipy/scipy.git
MAINT: sparse.linalg: Remove unnecessary operations
132
0
69,699
13
3
11
def news(xml_news_url, counter):
Google_News.py
23
"""Print select details from a html response containing xmla html response containing
Python
{ "docstring": "Print select details from a html response containing xml", "language": "en", "n_whitespaces": 8, "n_words": 9, "vocab_size": 9 }
3
Python
3
f0af0c43340763724f139fa68aa1e5a9ffe458b4
Google_News.py
22,452
16
95
news
https://github.com/geekcomputers/Python.git
refactor: clean code Signed-off-by: slowy07 <[email protected]>
6
2
4,346
5
4
16
def _estimator_with_converted_arrays(estimator, converter): from sklearn.base import clone new_estimator = clone(estimator) for key, attribute in vars(estimator).items(): if hasattr(attribute, "__array_namespace__") or isinstance( attribute, numpy.ndarray ): attribute = converter(attribute) setattr(new_estimator, key, attribute) return new_estimator
sklearn/utils/_array_api.py
107
scikit-learn
{ "docstring": "Create new estimator which converting all attributes that are arrays.\n\n Parameters\n ----------\n estimator : Estimator\n Estimator to convert\n\n converter : callable\n Callable that takes an array attribute and returns the converted array.\n\n Returns\n -------\n new_estimator : Estimator\n Convert estimator\n ", "language": "en", "n_whitespaces": 84, "n_words": 39, "vocab_size": 32 }
31
Python
27
2710a9e7eefd2088ce35fd2fb6651d5f97e5ef8b
_array_api.py
261,018
10
67
_estimator_with_converted_arrays
https://github.com/scikit-learn/scikit-learn.git
ENH Adds Array API support to LinearDiscriminantAnalysis (#22554) Co-authored-by: Olivier Grisel <[email protected]> Co-authored-by: Julien Jerphanion <[email protected]>
89
0
76,626
12
1
3
def variables(self): return self._weights
keras/optimizers/optimizer_v2/optimizer_v2.py
19
keras
{ "docstring": "Returns variables of this Optimizer based on the order created.", "language": "en", "n_whitespaces": 9, "n_words": 10, "vocab_size": 10 }
4
Python
4
84afc5193d38057e2e2badf9c889ea87d80d8fbf
optimizer_v2.py
275,487
2
10
variables
https://github.com/keras-team/keras.git
Reformatting the codebase with black. PiperOrigin-RevId: 450093126
18
0
81,391
6
1
9
def _get_link_annotation(gc, x, y, width, height): link_annotation = { 'Type': Name('Annot'), 'Subtype': Name('Link'), 'Rect': (x, y, x + width, y + height), 'Border': [0, 0, 0], 'A': { 'S': Name('URI'), 'URI': gc.get_url(), }, } return link_annotation
lib/matplotlib/backends/backend_pdf.py
132
matplotlib
{ "docstring": "\n Create a link annotation object for embedding URLs.\n ", "language": "en", "n_whitespaces": 15, "n_words": 8, "vocab_size": 8 }
36
Python
31
02c7ae22b4b1e7cc4fb70e18b208115f438f8f7b
backend_pdf.py
108,704
12
80
_get_link_annotation
https://github.com/matplotlib/matplotlib.git
Refactor URL handling
112
0
23,311
13
6
11
def exclude_from_weight_decay(self, var_list=None, var_names=None): if hasattr(self, "_built") and self._built: raise ValueError( "`exclude_from_weight_decay()` can only be configued before " "the optimizer is built." ) if var_list: self._exclude_from_weight_decay = [ self._var_key(variable) for variable in var_list ] else: self._exclude_from_weight_decay = [] self._exclude_from_weight_decay_names = var_names or []
keras/optimizers/optimizer_experimental/optimizer.py
113
keras
{ "docstring": "Exclude variables from weight decay.\n\n This method must be called before the optimizer's `build` method is\n called. You can set specific variables to exclude out, or set a list of\n strings as the anchor words, if any of which appear in a variable's\n name, then the variable is excluded.\n\n Args:\n var_list: A list of `tf.Variable`s to exclude from weight decay.\n var_names: A list of strings. If any string in `var_names` appear\n in the model variable's name, then this model variable is\n excluded from weight decay. For example, `var_names=['bias']`\n excludes all bias variables from weight decay.\n ", "language": "en", "n_whitespaces": 204, "n_words": 95, "vocab_size": 59 }
43
Python
38
38b618ad90d669c85cccee521ad73cc0630cf750
optimizer.py
280,005
13
67
exclude_from_weight_decay
https://github.com/keras-team/keras.git
Add general `weight_decay` support in optimizer. We still keep adamw optimizer in case people want an explicit adamw. We can delete it in a followup cl. PiperOrigin-RevId: 477043911
178
0
83,214
11
12
14
def render(pieces, style): if pieces["error"]: return { "version": "unknown", "full-revisionid": pieces.get("long"), "dirty": None, "error": pieces["error"], "date": None, } if not style or style == "default": style = "pep440" # the default if style == "pep440": rendered = render_pep440(pieces) elif style == "pep440-branch": rendered = render_pep440_branch(pieces) elif style == "pep440-pre": rendered = render_pep440_pre(pieces) elif style == "pep440-post": rendered = render_pep440_post(pieces) elif style == "pep440-post-branch": rendered = render_pep440_post_branch(pieces) elif style == "pep440-old": rendered = render_pep440_old(pieces) elif style == "git-describe": rendered = render_git_describe(pieces) elif style == "git-describe-long": rendered = render_git_describe_long(pieces) else: raise ValueError(f"unknown style '{style}'") return { "version": rendered, "full-revisionid": pieces["long"], "dirty": pieces["dirty"], "error": None, "date": pieces.get("date"), }
pandas/_version.py
347
pandas
{ "docstring": "Render the given version pieces into the requested style.", "language": "en", "n_whitespaces": 8, "n_words": 9, "vocab_size": 8 }
105
Python
53
e2df99823758210fb2b7c4aba39e23f3445f7cd3
_version.py
171,634
36
186
render
https://github.com/pandas-dev/pandas.git
BLD: use nonvendor versioneer (#49924) * BLD: remove vendored versioneer * run vis * move config to pyproject.toml * add versioneer to deps * run pyupgrade * fix isort and pylint * fix ci * fix env
322
0
40,701
12
3
10
def get_local_agent_user_id(self, webhook_id): found_agent_user_id = None for agent_user_id, agent_user_data in self._store.agent_user_ids.items(): if agent_user_data[STORE_GOOGLE_LOCAL_WEBHOOK_ID] == webhook_id: found_agent_user_id = agent_user_id break return found_agent_user_id
homeassistant/components/google_assistant/helpers.py
65
core
{ "docstring": "Return the user ID to be used for actions received via the local SDK.\n\n Return None is no agent user id is found.\n ", "language": "en", "n_whitespaces": 37, "n_words": 23, "vocab_size": 19 }
21
Python
18
25fe213f222f8f49a8126130a8e507fa15e63c83
helpers.py
308,771
7
40
get_local_agent_user_id
https://github.com/home-assistant/core.git
Enable local fulfillment google assistant (#63218) Co-authored-by: Paulus Schoutsen <[email protected]>
90
0
107,509
10
49
8
def factor_nc(expr): from sympy.simplify.simplify import powsimp from sympy.polys import gcd, factor
sympy/core/exprtools.py
36
sympy
{ "docstring": "Return the factored form of ``expr`` while handling non-commutative\n expressions.\n\n Examples\n ========\n\n >>> from sympy import factor_nc, Symbol\n >>> from sympy.abc import x\n >>> A = Symbol('A', commutative=False)\n >>> B = Symbol('B', commutative=False)\n >>> factor_nc((x**2 + 2*A*x + A**2).expand())\n (x + A)**2\n >>> factor_nc(((x + A)*(x + B)).expand())\n (x + A)*(x + B)\n ", "language": "en", "n_whitespaces": 89, "n_words": 53, "vocab_size": 36 }
11
Python
9
498015021131af4dbb07eb110e5badaba8250c7b
exprtools.py
196,233
132
983
factor_nc
https://github.com/sympy/sympy.git
Updated import locations
20
0
47,733
6
1
5
def on_template_context(self, context, template_name, config): return context
mkdocs/plugins.py
22
mkdocs
{ "docstring": "\n The `template_context` event is called immediately after the context is created\n for the subject template and can be used to alter the context for that specific\n template only.\n\n Parameters:\n context: dict of template context variables\n template_name: string filename of template\n config: global configuration object\n\n Returns:\n dict of template context variables\n ", "language": "en", "n_whitespaces": 137, "n_words": 50, "vocab_size": 35 }
7
Python
7
f79b34d174e41084391868e7b503f5c61b8b1bdf
plugins.py
224,448
2
14
on_template_context
https://github.com/mkdocs/mkdocs.git
Move plugin events docs into source code + refactor * Create real (no-op) methods for each event in the base class. * Refactor event dispatcher to not check for methods' existence, instead just call them. * Move documentation from Markdown into docstrings of these methods. * Activate the 'mkdocstrings' plugin. * Use 'mkdocstrings' to insert documentation from those docstrings into the site.
21
0
57,293
6
7
16
def _download_and_process(self, kaggle_username=None, kaggle_key=None): if self.state == DatasetState.NOT_LOADED: try: self.download(kaggle_username=kaggle_username, kaggle_key=kaggle_key) except Exception: logger.exception("Failed to download dataset") self.verify() if self.state == DatasetState.DOWNLOADED: # Extract dataset try: self.extract() except Exception: logger.exception("Failed to extract dataset") if self.state == DatasetState.EXTRACTED: # Transform dataset try: self.transform() except Exception: logger.exception("Failed to transform dataset")
ludwig/datasets/loaders/dataset_loader.py
175
ludwig
{ "docstring": "Loads the dataset, downloaded and processing it if needed.\n\n If dataset is already processed, does nothing.\n ", "language": "en", "n_whitespaces": 30, "n_words": 16, "vocab_size": 16 }
48
Python
28
e4fc06f986e03919d9aef3ab55c05fee5a6b9d3a
dataset_loader.py
8,071
17
101
_download_and_process
https://github.com/ludwig-ai/ludwig.git
Config-first Datasets API (ludwig.datasets refactor) (#2479) * Adds README and stub for reading dataset configs. * Adds __init__.py for configs, moves circular import into function scope in ludwig/datasets/__init__.py * Print config files in datasets folder. * First pass at automatic archive extraction. * Implemented downloading and extract. * Refactor DatasetConfig into its own file. * Fixed bugs downloading kaggle dataset. * Makes registry store dataset instances, not classes. Also comments out import_submodules for testing. * Typo fix. * Only pass data files on to load_unprocessed_dataframe, symlink directories. * Downloading dataset files into existing directory if exists. * Refactor: make datasets fully config-first, lazy load dataset loaders. * Implemented agnews custom loader. * Implements train/validation/test split by files, and globbing support * Adds _glob_multiple * Adds adult_census_income, agnews, allstate_claims_severity. * Implements sha256 verification, adds more datasets up to creditcard_fraud. * Adds checksums, dbpedia, electricity * Fixes gzip file name returned as string not list, adds up to forest_cover dataset. * Adds datasets up to reuters_r8 * Adds all datasets which don't require a custom class. * Restore dataset import behavior by implementing module __getattr__ * Adds KDD datasets. * Adds ieee_fraud. * Adds imbalanced_insurance, insurance_lite. * Adds mnist. * Completes implementation of all of the built-in datasets. * Made cache_dir optional, read from environment variable if set. * Upgrades datasets tests. * Adds test for new dataset config API. Also adds scripts for dataset link checking. * Fixes loading allstate claims severity dataset. * Use @lru_cache(1), @cache not supported in python < 3.9 * Deletes dataset registry, updates automl test utils * Fix imports of datasets API. * Adds more detail to sha256: docstring and basic README * Copy-paste link oops. * Fixes handling of nested archive types like .tar.bz Also adds a LUDWIG_CACHE and export to the README * Adds link for twitter bots. * Fix order of splits in README.md * typo * Adds verify as a phase in doc string. * Support .pqt, .pq extensions for parquet. * Handle nested archives with longer file extensions like .csv.zip * Handle nested .gz types properly too. Check all extensions with .endswith * Handle all archive types with .endswith * Update ludwig/datasets/loaders/split_loaders.py Co-authored-by: Joppe Geluykens <[email protected]> * Adds explanation for export, fixes preserve_paths (should be relative to processed_dataset_dir) * Resolve preserved paths relative to raw dataset dir before move. * Catch runtime exception from extracting sub-archives. Co-authored-by: Daniel Treiman <[email protected]> Co-authored-by: Joppe Geluykens <[email protected]>
261
0
1,324
13
2
6
def fromkeys(cls, iterable, value=None): d = cls() for key in iterable: d[key] = value return d
.venv/lib/python3.8/site-packages/pip/_vendor/distlib/compat.py
48
transferlearning
{ "docstring": "OD.fromkeys(S[, v]) -> New ordered dictionary with keys from S\n and values equal to v (which defaults to None).\n\n ", "language": "en", "n_whitespaces": 41, "n_words": 19, "vocab_size": 18 }
16
Python
14
f638f5d0e6c8ebed0e69a6584bc7f003ec646580
compat.py
61,896
5
30
fromkeys
https://github.com/jindongwang/transferlearning.git
upd; format
75
0
12,743
9
1
5
def _get_permission_name(self, action): return "%s.%s_%s" % (self.app_label, action, self.model_name)
wagtail/core/permission_policies/base.py
36
wagtail
{ "docstring": "\n Get the full app-label-qualified permission name (as required by\n user.has_perm(...) ) for the given action on this model\n ", "language": "en", "n_whitespaces": 40, "n_words": 18, "vocab_size": 17 }
9
Python
9
d10f15e55806c6944827d801cd9c2d53f5da4186
base.py
73,922
2
22
_get_permission_name
https://github.com/wagtail/wagtail.git
Reformat with black
23
0
16,177
8
1
7
def get_children(self, request, parent): raise NotImplementedError(f'{self.__class__.__name__} must implement get_children()')
netbox/netbox/views/generic/object_views.py
37
netbox
{ "docstring": "\n Return a QuerySet of child objects.\n\n Args:\n request: The current request\n parent: The parent object\n ", "language": "en", "n_whitespaces": 59, "n_words": 15, "vocab_size": 14 }
9
Python
9
c5770392e32aeeaed9bd8dcf907a11c7df352b6c
object_views.py
265,243
2
16
get_children
https://github.com/netbox-community/netbox.git
Refactor ObjectChildrenView
23
0
78,045
11
1
6
def fit(self, X, y=None): X = check_array(X, accept_sparse='csr') return self
tpot/builtins/feature_transformers.py
40
tpot
{ "docstring": "Do nothing and return the estimator unchanged\n This method is just there to implement the usual API and hence\n work in pipelines.\n Parameters\n ----------\n X : array-like\n ", "language": "en", "n_whitespaces": 69, "n_words": 27, "vocab_size": 25 }
10
Python
10
388616b6247ca4ea8de4e2f340d6206aee523541
feature_transformers.py
181,860
3
24
fit
https://github.com/EpistasisLab/tpot.git
Revert "Deployed 7ccda9a with MkDocs version: 1.3.0" This reverts commit bd9629c40e01241766197119b581a99409b07068.
31
0
43,629
10
1
7
def get_hidden_input_context(self, name, value, attrs): return super().get_context(name, value, attrs)
wagtail/admin/widgets/chooser.py
38
wagtail
{ "docstring": "\n Return the context variables required to render the underlying hidden input element\n ", "language": "en", "n_whitespaces": 27, "n_words": 12, "vocab_size": 11 }
9
Python
8
0f7a365bf8bf72a4894c1ca447cf52ba67f40b0c
chooser.py
77,527
2
25
get_hidden_input_context
https://github.com/wagtail/wagtail.git
Avoid calling super().render() in BaseChooser This frees us up to redefine template_name and get_context in subclasses without it interfering with the rendering of the hidden input.
23
0
16,669
9
1
15
def test_with_no_current_site(self): self.default_site.is_default_site = False self.default_site.save() start_url = reverse("wagtailsettings:edit", args=["tests", "testsetting"]) response = self.client.get( start_url, follow=True, HTTP_HOST="noneoftheabove.example.com" ) self.assertEqual(302, response.redirect_chain[0][1])
wagtail/contrib/settings/tests/test_admin.py
113
wagtail
{ "docstring": "\n Redirection should not break if the current request does not correspond to a site\n ", "language": "en", "n_whitespaces": 29, "n_words": 14, "vocab_size": 13 }
20
Python
18
d10f15e55806c6944827d801cd9c2d53f5da4186
test_admin.py
73,489
8
68
test_with_no_current_site
https://github.com/wagtail/wagtail.git
Reformat with black
80
0
16,026
11
2
8
def get_fiscal_year_data(from_fiscal_year, to_fiscal_year): fiscal_year = frappe.db.sql( , {"from_fiscal_year": from_fiscal_year, "to_fiscal_year": to_fiscal_year}, as_dict=1, ) return fiscal_year[0] if fiscal_year else {}
erpnext/accounts/report/financial_statements.py
66
erpnext
{ "docstring": "select min(year_start_date) as year_start_date,\n\t\tmax(year_end_date) as year_end_date from `tabFiscal Year` where\n\t\tname between %(from_fiscal_year)s and %(to_fiscal_year)s", "language": "en", "n_whitespaces": 13, "n_words": 16, "vocab_size": 15 }
19
Python
18
494bd9ef78313436f0424b918f200dab8fc7c20b
financial_statements.py
65,238
9
42
get_fiscal_year_data
https://github.com/frappe/erpnext.git
style: format code with black
12
0
13,830
11
3
12
def _test_Qt_QtWebEngineQuick(pyi_builder, qt_flavor): if is_darwin: # QtWebEngine on Mac OS only works with a onedir build -- onefile builds do not work. # Skip the test execution for onefile builds. if pyi_builder._mode != 'onedir': pytest.skip('QtWebEngine on macOS is supported only in onedir mode.') source = import QtQuick 2.0 import QtQuick.Window 2.0 import QtWebEngine 1.0 Window {{ visible: true WebEngineView {{ id: view anchors.fill: parent Component.onCompleted: loadHtml(' <!doctype html> <html lang="en"> <head> <meta charset="utf-8"> <title>Test web page</title> </head> <body> <p>This is a test web page.</p> </body> </html> ') }} Connections {{ target: view function onLoadingChanged(loadRequest) {{ if (loadRequest.status !== WebEngineView.LoadStartedStatus) {{ Qt.quit() }} }} }} }} .format(qt_flavor) pyi_builder.test_source(source, **USE_WINDOWED_KWARG) @requires('PyQt5') @requires('PyQtWebEngine')
tests/functional/test_qt.py
93
@requires('PyQt5') @requires('PyQtWebEngine')
pyinstaller
{ "docstring": "\n import sys\n\n from {0}.QtGui import QGuiApplication\n from {0}.QtQml import QQmlApplicationEngine\n\n is_qt6 = '{0}' in {{'PyQt6', 'PySide6'}}\n\n if is_qt6:\n from {0}.QtWebEngineQuick import QtWebEngineQuick\n else:\n from {0}.QtWebEngine import QtWebEngine as QtWebEngineQuick\n QtWebEngineQuick.initialize()\n\n app = QGuiApplication([])\n engine = QQmlApplicationEngine()\n engine.loadData(b)\n\n if not engine.rootObjects():\n sys.exit(-1)\n\n if is_qt6:\n # Qt6: exec_() is deprecated in PySide6 and removed from PyQt6 in favor of exec()\n res = app.exec()\n else:\n res = app.exec_()\n del engine\n sys.exit(res)\n ", "language": "en", "n_whitespaces": 247, "n_words": 68, "vocab_size": 47 }
110
Python
86
947a96a8d2fc80bb76a4492fc9b631d642cf5065
test_qt.py
262,817
66
40
_test_Qt_QtWebEngineQuick
https://github.com/pyinstaller/pyinstaller.git
tests: fixup QtWebEngine Qml/Quick test
762
1
77,384
12